query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Constructor de la clase, que lee el archivo de texto "fname" a modificar.
def __init__(self,fname): try: self.handler = open(fname,'r') self.filename = fname except Exception: log.exception('Fallo al abrir ' + fname) return None self.content = self.handler.readlines() self.handler.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, fname):\n self.fname = os.path.abspath(fname)\n self.restore()", "def __init__(self, nombre):\n\n try:\n # Intentar abrir el archivo\n self.f = open(nombre, 'r')\n self.nombre = nombre\n except:\n # Si no se puede abrir el archivo, entonces se termina el programa\n print('No se puede abrir el archivo', nombre)\n exit()", "def __init__(self, file_name):\n self.file_name = file_name", "def __init__(self, filename):\n self.filename = filename\n self.file = open(filename, \"w\")", "def __init__(self, filename):\r\n\r\n self.filename = filename", "def __init__(self, filename):\n self.filename = filename", "def __init__(self,filename):\n\n\t\tself.filename = filename", "def __init__(self, filename):\n self._filename = filename\n pass", "def set_filename(self, file_name):", "def __init__(self, file: str):\n self._file = file", "def __init__(self, fname):\n if type(fname) == list:\n fname = fname[0]\n self.fname = fname\n self.iuvsfname = Filename(fname)\n self.hdulist = fits.open(self.fname)", "def __init__(self,file):\n self.file = file", "def __init__(self, file_path: str, mode: str = \"single\"):\n self.file_path = file_path\n super().__init__(mode=mode)", "def __init__(self, name=''):\r\n if name:\r\n self.file_name = name + '.md'\r\n self.file = open(self.file_name, 'w+', encoding='UTF-8')\r\n self.file.close()", "def __init__(self, file):\n self._file = file", "def setFile(self, filename): #$NON-NLS-1$\r", "def __init__(self, filename=None):\n self._filename = filename", "def __init__(self, filename, validate=True):\n pass", "def __init__(self, file: IO, mode: str = \"single\"):\n self.file = file\n super().__init__(mode=mode)", "def __init__(self, fileName, fpga):\n self.fileHandle = open(fileName + '.tcl', 'a+')\n self.fpga = fpga", "def __init__(self, file, prefix = None):\n self.file = file\n self.prefix = prefix", "def __init__(self, fn: str):\n\n try:\n self.file = open(fn)\n except IOError:\n print(\"File {} not found\".format(fn))\n print(\"Exiting\")\n sys.exit(0) # can't go on", "def __init__(self, project, fname):\n if len(fname) < 4 or fname[-4:] != '.omf':\n fname = fname + '.omf'\n self.fname = fname\n with open(fname, 'wb') as fopen:\n self.initialize_header(fopen, project.uid)\n self.project_json = project.serialize(open_file=fopen)\n self.update_header(fopen)\n fopen.write(json.dumps(self.project_json).encode('utf-8'))", "def openFile(self, fname):\n self._fname = fname\n self._fid = open(fname, \"rb\")", "def open(self):\n self.f = open(self.join(self.fname), 'rb')", "def __init__(self, datafile):\n self.FILE = open(datafile, 'w')", "def __init__(self, file_path):\r\n self.file_path = Path(file_path)\r\n self.fname = self.file_path.name\r\n self.d_stgs = settings.DisplaySettings()\r\n self.c_stgs = settings.CalculationSettings()\r\n logger.info(f'{self} create')", "def __init__(self, file_path, label):\n self.file_path = file_path\n self.label = label\n super(LabeledFileRecord, self).__init__()", "def setFile(self, filename):\n self.prepare() #new call on each new file to process\n self.filename = \"%s\" % filename", "def __init__(self, filepath):\n self.filepath = filepath", "def __init__(self, filename, registry):\n self.filename = filename\n self.registry = registry", "def open_file(self, fname):\n\n # Save that the file is opened.\n self.open_files[fname] = {}\n self.open_files[fname][\"name\"] = fname\n self.open_files[fname][\"contents\"] = []", "def __init__( self, fileName ):\n\n if not os.path.exists( fileName ):\n raise RepAcsError, repr(fileName) + \" file does not exist.\"\n\n self.fileName = fileName\n self.db = pnlDb.PnlDb( fileName, 'old' )", "def __init__(self, fileName):\n\t\t\n\t\tself.file = fileName\n\t\t\n\t\ttry:\n\t\t\tfile = open(fileName[:-1], 'r')\n\t\texcept IOError:\n\t\t\thandle_error(ERR_IO + \" \" + fileName, USAGE_STR)\n\t\telse:\n\t\t\tfileLines = file.readlines()\n\t\t\tfile.close();\n\t\t\tprint fileLines", "def __init__(self, fname, endian='<', checksum_size=None, debug_level=0):\n self.endian = endian\n self.f = open(expanduser(fname), 'rb')\n self.f.seek(0, 2)\n self.fsize = self.tell()\n self.f.seek(0, 0)\n self.close = self.f.close\n if checksum_size:\n pass\n #self.cs = checksum(self, 0, checksum_size)\n else:\n self.cs = checksum_size\n self.debug_level = debug_level", "def __init__(self, fileR):\n self.__fileR=fileR", "def __init__(self, line_parser, *filename):\n \n self.line_parser = line_parser\n self.f = fileinput.input(filename)", "def __init__(self, path, file):\n self.path = path\n self.file = file", "def __init__(self, fpath, dup):\n self.DUP = dup\n if not os.path.exists(fpath):\n raise FileNotFoundError(ENOENT, os.strerror(ENOENT), fpath)\n self.fpath = fpath", "def __init__(self,file_reader):\n self.file_reader = file_reader", "def from_file(self, filename=None):\n if not self.name:\n #we don't have a file associated with the EntryList:\n if not filename:\n print \"UNKNOWN FILE!\"\n exit\n else:\n self.name = filename\n \n elif filename and filename != self.name:\n #ambiguous which file to use\n print \"different file than what log was initialized with\"\n exit\n \n else:\n #we have an original filename and none passed in\n #or the original filename equals the one passed in\n #should be good to go\n pass\n\n if os.path.exists(self.name):\n\n #f = open(self.name, \"U\")\n #2009.04.02 20:44:31 \n #very strange behavior when opening up utf-8 files\n #characters get reincoded\n #this is especially prominent when using check_feed.py\n #was using latin_1... going back to utf-8\n #f = codecs.open(self.name, encoding='latin_1')\n #codecs.ignore_errors(UnicodeDecodeError) \n f = codecs.open(self.name, encoding='utf-8', errors='ignore')\n\n self.write(f.read())\n f.close\n\n self.seek(0)\n\n else:\n print \"NO FILE ASSOCIATED WITH LOG: %s\" % self.name", "def __init__(self, path_to_the_file):", "def __init__(self, path_to_the_file):", "def __init__(self, username, file_path):\r\n self.username = username\r\n self.file_path = file_path", "def save(self, fname):\n pass", "def __init__(self, filename='/var/humtemp/file.csv'):\n self.filename = filename", "def __init__(self, fname, normalize_names=True):\n self.file = fname\n self.folder = os.path.dirname(fname)\n self.root = read_standard_xml(fname)\n self.name = self.root.get(\"model\")\n self.worldbody = self.create_default_element(\"worldbody\")\n self.actuator = self.create_default_element(\"actuator\")\n self.asset = self.create_default_element(\"asset\")\n self.equality = self.create_default_element(\"equality\")\n self.contact = self.create_default_element(\"contact\")\n self.default = self.create_default_element(\"default\")\n self.compiler = self.create_default_element('compiler')\n self.option = self.create_default_option()\n self.resolve_asset_dependency()\n if normalize_names:\n self.normalize_names()", "def __setFileName(self,\n fileName):\n self.__fileName = fileName\n return self", "def __init__(self, filename=None, logging=True):\n self.birthday = str(int(time.time()))\n #\n # Used during .write(), override only if absolutely necessary.\n self.linesep = '\\n'\n #\n # Create a local log object to track actions.\n self.log = self.Log(logging=logging)\n self.log('init(filename={0}):'.format(filename))\n #\n # The list where contents of the file are stored\n self.contents = list()\n #\n # Accept filename during instantiation, default is None.\n self.filename = filename\n #\n # Declare current state is original data from self.filename.\n # This is set to False during .read() and .write()/.save()\n # Any method that alters self.contents changes this to True.\n self.changed = False\n #\n # Automatically sort file on read()\n self.sorted = False\n #\n # Ensure file contents are always unique.\n self.unique = False\n #\n # If you gave me a file to read when instantiated, then do so.\n if self.filename is not None:\n self.read(self.filename)", "def __init__(self, input_filename='input.txt', output_filename='output.txt'):\n self._input = input_filename\n self._output = output_filename\n self._fin = open(self._input, 'r')\n self._fout = open(self._output, 'w')", "def __init__(self, obj):\n super().__init__(\"File {} doesn't exist or invalid.\".format(obj))", "def __init__(self):\n self.file_name = 'moes_tavern_lines.txt'\n self.path_to_file = abspath(join(getcwd(), '../data',\n self.file_name))", "def __init__(self, path):\r\n self.path = path\r\n \"\"\" If the file doesn't exist, touch it \"\"\"\r\n open(self.path, 'w').close()", "def __init__(self, fp):\n self._fp = fp", "def __init__(self, fname=None, Umean=None, verbose=False, **kwargs):\n super(self.__class__,self).__init__(verbose,**kwargs)\n self.Umean = Umean\n\n if fname is not None:\n self.read_field(fname)", "def __init__(self, filename):\n config.log.critical(\"Failed to make the new database\")\n config.log.critical(\"This path is not valid or is inaccesible?\")\n config.log.critical(\"Tried to open: '%s'\" % filename)", "def __init__(self,file=None):\n\t\tassert file!=None , 'file argument should not be None'\n\t\tself.file=file\n\t\tself.raw_text=False\n\t\tself.text =False", "def __init__(self,file=None,fromtext=None):\n\t\tassert file!=None or fromtext!=None, 'either file or fromtext argument should not be None'\n\t\tif file!=None:\n\t\t\tself.file = file\n\t\t\tself.raw_text = FMTextFileManagement.open(self.file)\n\t\telse:\n\t\t\tself.file=None\n\t\t\tself.raw_text = fromtext\n\t\tself.text =False", "def __init__(self, file):\n self.file = file # maybe should be a folder?\n self.file = self.file.rsplit('.')[0] + \".asm\"\n self.name = None\n self.func_name = None\n self.if_count = 0\n self.call_count = 0", "def __init__(self, file_path: str):\n\n super().__init__(file_path)\n self.reader = None", "def __init__(self, filename, overwrite=False, autocommit=False):\n if overwrite:\n self.file = diskfile.open2(filename, \"w+b\")\n else:\n try:\n self.file = diskfile.open2(filename, \"r+b\")\n except FileNotFoundError:\n self.file = diskfile.open2(filename, \"x+b\")\n self.autocommit = autocommit\n self.revert()", "def __init__(self, name, fn):\n self.name = name\n self.fn = fn", "def set_new_filedetails(self, name, path):\n File.filename(name)\n File.filepath(path)", "def __init__(self, filename, mode=ID3_FILE_READ):\n\n\t\tif not os.path.exists(filename):\n\t\t\traise ID3ParameterException(\"File not found: %s\" % filename)\n\n\t\t\n\t\tif mode == ID3_FILE_READ:\n\t\t\tself._f = open(filename, 'rb')\n\t\telif mode in [ID3_FILE_MODIFY, ID3_FILE_NEW]:\n\t\t\tself._f = open(filename, 'r+b')\n\t\telse:\n\t\t\traise ID3ParameterException(\"invalid mode\")\n\n\t\t\n\t\tself._filename = filename\n\t\tself._tag = {'songname':'',\n\t\t\t 'artist':'',\n\t\t\t 'album':'',\n\t\t\t 'year':'',\n\t\t\t 'comment':'',\n\t\t\t 'genre':0,\n\t\t\t 'track':0\n\t\t\t }\n\n\t\tif mode != ID3_FILE_NEW:\n\t\t\tself.parse()", "def __init__(self, nome, sobrenome, cpf):\n self._nome = nome\n self._sobrenome = sobrenome\n self._cpf = cpf", "def __init__(self, fname, procs, state, info=None):\n self._fname = fname\n self._state = state\n (self._ff,\n self._info) = self._prepare(fname, procs, info)\n self._coords = {}", "def __init__(self, file_name=None):\n self.file_name = file_name\n self.frd = None\n self._steps = []\n if file_name is not None:\n self.load(file_name)", "def __init__(self,filename,**kwargs):\n modlogger.debug( \"LF:%s %s\"%(filename,kwargs))\n self.fname = filename\n reserve = kwargs.get(\"reserve\",DEFAULT_LOGSZ)\n reserve = max(reserve,MINIMUM_LOGSZ)\n self.ro = kwargs.get(\"readonly\",False)\n\n mode = \"wb+\" if not self.ro else \"rb\"\n self.fd = open(filename,mode)\n #FIXME: In the case that this is a new file\n #we should sync the directory fd (which means opening one).\n \n #We detect sparse logfile, and dont really support\n # them properly.\n self.sparse = False\n self.not_complete_lock = threading.Lock()\n self.not_complete = []\n self.closing = False \n self.outstanding = threading.Condition(self.not_complete_lock)\n \n if not self.ro: self._reserve(reserve)", "def _fromFile(self,filepath, filename):\n pass", "def __init__(self, path=None, filename=\"filename\", prefix: str = None):\n\n if not prefix:\n prefix = \"\"\n self._prefix = prefix\n\n def_pwd(path)\n\n self._path = path\n self._filename = \"\"\n self.default_filename = filename\n self._file_list = []", "def __init__(self, fname, lockduration=10, verbosity=0):\n self._verbosity = verbosity\n self._lockduration = lockduration\n fname = op.normpath(fname)\n self._basedir = op.dirname(fname)\n self._lockfilename = \"%s.lock\" % op.basename(fname)\n self._uniquename = \",\".join((\n self._lockfilename, socket.getfqdn(), str(os.getpid()),\n str(uuid.uuid4())[-12:],\n ))\n self._uniquefile_created = False\n self._p(\" File to lock: %s\" % fname)\n self._p(\"Lockfile name: %s\" % self._lockfilename)\n self._p(\" Unique name: %s\" % self._uniquename)", "def from_text_file(cls, filename):\n raise NotImplementedError()", "def __init__(self, file_path):\n self.file_path = file_path\n try:\n self.FastaFile = pysam.FastaFile(self.file_path)\n except:\n raise IOError(\"REFERENCE FILE ERROR\")", "def __init__(self, filename, default_sep=None):\n self.filename = filename\n if default_sep is not None:\n self.separator = default_sep", "def __init__(self, filepath):\n self.filepath = filepath\n with open(filepath) as f:\n self.lines = f.readlines()\n #self.lines = open(self.filepath).readlines()", "def __init__(self, filename_or_data, mode='r'):\n\n if mode[0] in ['r', 'a', 'w']:\n if mode == 'r':\n # force universal read mode\n mode = 'rU'\n self.__fobj = open(filename_or_data, mode)\n elif mode == 'f':\n self.__fobj = filename_or_data\n elif mode == 's':\n self.__fobj = StringIO.StringIO(filename_or_data)\n else:\n msg = \"mode string must start with 'r', 'a', 'w', 'f' or 's', \\\n not '%s'\" % mode[0]\n raise ValueError(msg)\n self.__mode = mode", "def __init__(self, name, mode='a', **kw):\n file_exists = op.isfile(name)\n self._mode = mode\n self._filename = name\n self._kw = kw\n self._registry = InserterRegistry()\n\n # Check existence\n if mode in ('r', 'r+') and not file_exists:\n msg = \"File '{}' does not exist\".format(name)\n raise IOError(msg)\n\n # Check the header when mode requires the file to exist\n if mode in ('r', 'r+') or (file_exists and mode == 'a'):\n with self._h5file('r') as h5file:\n error_if_bad_header(h5file)\n\n # Check that file is writable when mode will write to file.\n if mode != 'r':\n with self._h5file('a') as h5file:\n error_if_not_writable(h5file)\n\n # Create the header if this is a new file\n if mode in ('w', 'w-', 'x') or (not file_exists and mode == 'a'):\n with self._h5file(mode) as h5file:\n write_header(h5file.attrs)", "def __init__(self, nom, chemin):\n self.nom = nom\n self.path = chemin", "def __init__(self, filename=None, **kwargs):\n self.filename = filename\n if filename:\n self.read(**kwargs)", "def setStatiFile(self, filename):\n self.statiFile = filename", "def __init__(self, filename, unique_keys):\n self.filename = filename\n self.data = None\n self.unique_keys = sorted(list(set(unique_keys)))\n self._reload()", "def __init__(self, file_name=None, file_object=None, pdb_code=None):\n self.line_number = 0\n if file_name is not None:\n assert file_object is None\n assert pdb_code is None\n self.file_object = open(file_name)\n elif file_object is not None:\n assert pdb_code is None\n self.file_object = file_object\n elif pdb_code is not None:\n self.file_object = mmcif_files.getFile(pdb_code)\n else:\n raise ValueError(\"No input file given\")", "def __init__(self, file_name, key):\n try:\n self._file_name = file_name\n self._encryptor = AES(key.encode())\n self._document = open(self._file_name, \"rb+\")\n except Exception as error:\n print(error)\n sys.exit(1)", "def __init__(self, tailed_file):\n self.tailed_file = tailed_file\n self.check_file_validity()\n self.tailed_file = tailed_file", "def SetFilename(self, f):\n self._filename = f", "def change_file_name(self, n):\n if type(n) != str or n is None:\n raise TypeError(\"Wrong type! Please pass 'n' as a string!\")\n self.name = n", "def read_file(self, fname, name):\r\n self.filename = name\r\n if fname != \".\":\r\n self.fname = f\"{fname}\\\\\"\r\n self.pathread = os.path.join(self.p, self.fname)\r\n else:\r\n self.pathread = self.p\r\n try:\r\n self.path = os.path.join(self.pathread, self.filename)\r\n with open(self.path, 'r') as read:\r\n self.data = read.readlines()\r\n except Exception as error:\r\n return error\r\n finally:\r\n send = \" \".join(self.data)\r\n return send", "def __init__(self, data_file_name: str, message: str) -> None:\n\n super().__init__(\n \"Invalid data error. \"\n \"The file '{}' contained data of the wrong format: {}\".format(\n data_file_name, message\n )\n )", "def __init__(self, fname: str, **kwargs) -> None:\n self.filename = fname # Cache the filename when we call read()\n self.verbose = kwargs.pop('verbose', False)\n self.mode = kwargs.pop('mode', 'r')\n\n if self.mode not in ('r', 'w'):\n raise ValueError('Invalid mode [%s], must be one of [r] or [w]' % str(self.mode))\n\n # load file pointer\n try:\n self.fp = h5py.File(self.filename, self.mode)\n except:\n raise RuntimeError('Failed to open HDF5 file [%s]' % str(self.filename))\n\n self._init_data()", "def __init__(self):\n self.fileReader = FileReader()\n self.path = os.path.abspath(\"\")", "def from_file(cls, path_src):\n cp_cond = [os.path.exists(path_src), os.path.isfile(path_src),\n len(path_new) != 0]\n content = \"\"\n\n # read input from file\n if cp_cond[0] and cp_cond[1]:\n with open(path_src) as f:\n content = f.read()\n\n # connect object with file content\n return cls(path_src, inp_string=content, to_file=False)", "def __init__(self, filename):\n if '.tex' in filename:\n self._filename = filename\n self._title = filename.replace('.tex', '')\n print(\"Title of presentation is\", self._title.replace(\"_\", \" \"))\n else:\n self._title = filename\n self._filename = filename + '.tex'\n print(\"Filename is\", self._filename)\n # Check to see if filename or filename.tex exists\n if os.path.exists(self._filename):\n with open(self._filename) as tex_file:\n self._latex_str = tex_file.read()\n else:\n self._latex_str = ''\n # self._latex_str =", "def __init__(self, name: str, content: Optional[str] = None, kind: Optional[FileType] = None):\n self.name = name\n self.path = Path(name)\n self._content = content\n # TODO(ssbarnea): implement kind detection when not provided\n self.kind = kind", "def __init__(self, filename, exoid, offset=1):\n self.filename = filename\n self.exoid = exoid\n self._o = offset\n\n pass", "def _set_filename(self, filename):\n tmp_file = '_'.join(filename.split())\n# new_file = new_file.replace(\"'\",\n# '_').replace('-',\n# '_').replace(' ',\n# '_').replace('(', '_').replace(')', '_')\n new_file = ''\n pathsep = os.path.sep \n if sys.platform == 'win32':\n pathsep = '/'\n for char in tmp_file:\n if char.isalnum() or char in ['.', '_', ':', pathsep, '-']:\n new_file += char\n try:\n shutil.copy(filename, new_file)\n except shutil.Error, err:\n msg = \"`%s` and `%s` are the same file\" % (filename, new_file)\n if str(err) == msg:\n pass\n else:\n raise err\n utils.ensure_file_exists(new_file)\n self._filename = new_file\n self._basename, self._ext = os.path.splitext(self._filename)", "def __init__(self, x):\n self.txt = x\n self.summary = x + '.summary'\n if not os.path.exists(self.summary):\n log.warning('summary file not exists: {}'.format(self.summary))", "def __init__(self, fileobject, filename=''):\n self.fh = fileobject\n self.filename = filename\n self.header = {}\n self.data = None\n self._data_corr = None\n self._bo = ''", "def __init__(self, name, file_path, unit_of_measurement, value_template):\n self._name = name\n self._file_path = file_path\n self._unit_of_measurement = unit_of_measurement\n self._val_tpl = value_template\n self._state = None", "def newFile(self):\n self.open_file_name = None\n self.ui.main_edit.setText(\"\")\n self.saveEnabled(False)", "def new_file(self):\r\n self.filename = QFileDialog.getSaveFileName(\r\n None, 'Title', '', 'TXT (*.txt)'\r\n )\r\n if self.filename[0]:\r\n self.currentfile = open(self.filename[0], 'w')\r\n (self.base_name, self.ext) = os.path.splitext(self.filename[0])\r\n self.FilePath.setText(self.filename[0])" ]
[ "0.7283871", "0.7172157", "0.6912619", "0.6812134", "0.6719622", "0.66636246", "0.6611331", "0.6509861", "0.65031904", "0.6496384", "0.64787287", "0.64747083", "0.64125896", "0.63539714", "0.63372934", "0.62784576", "0.6267829", "0.6260039", "0.62426263", "0.6211658", "0.6190379", "0.6162438", "0.6158899", "0.61526823", "0.6149002", "0.6145333", "0.61448723", "0.61422205", "0.6117604", "0.6117386", "0.60917234", "0.60859066", "0.6081848", "0.6065615", "0.6057123", "0.60430074", "0.6040526", "0.6034807", "0.6030501", "0.602761", "0.6017081", "0.6012726", "0.6012726", "0.5995507", "0.59788007", "0.59612197", "0.5960595", "0.5955402", "0.59293765", "0.5913332", "0.5882925", "0.587216", "0.5853342", "0.584759", "0.58438313", "0.58168536", "0.58068186", "0.578604", "0.5765904", "0.5763841", "0.5763131", "0.57620025", "0.5758134", "0.57536924", "0.5745984", "0.5745354", "0.5742891", "0.5742118", "0.57375365", "0.57349116", "0.5717995", "0.5714596", "0.57026136", "0.5701502", "0.56970733", "0.5696758", "0.5687963", "0.56827116", "0.5675463", "0.5673979", "0.5672183", "0.56579566", "0.5645071", "0.5644978", "0.5643323", "0.5642851", "0.5640347", "0.56326425", "0.56296927", "0.56295997", "0.5629556", "0.56283736", "0.56278497", "0.56249136", "0.56204915", "0.561941", "0.5616986", "0.5611655", "0.55997056", "0.559716" ]
0.67274874
4
Inserta una cadena delante en donde encuentre la cadena "tag"
def ins(self,tag,nstr,jumpline=True): tmp = [] strj = '\n' if jumpline else '' for line in self.content: tmp.append(line) if tag in line: tmp.append(nstr + strj) self.content = tmp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_tag_data(tag):\n\n add_tag = Tag(tag=tag)\n db.session.add(add_tag)\n try:\n db.session.commit()\n except (Exception, exc.SQLAlchemyError, exc.InvalidRequestError, exc.IntegrityError) as e:\n print(tag + '\\n' + str(e))", "def AddTag(self, tag):\n\n if not self.persistant:\n return\n\n self.db.ExecuteSql('insert into tags(tag, track_id) values(\"%s\", %d);'\n %(tag, self.persistant['id']))\n self.db.ExecuteSql('commit;')", "def _insert_tag_thing(cur, tag_id, thing_id):\n cur.execute(dbq.INSERT_TAG_THING, [tag_id, thing_id])\n logger.debug(\"Linked tag_id '{}' and thing_id '{}'\".format(tag_id, thing_id))", "def initiate_new_tag (self,tag,key):\r\n\r\n #with shelf\r\n if self.using_shelf:\r\n self.tag_dict[tag] = {key}\r\n #with database\r\n\r\n if self.using_database:\r\n\r\n value_tuple = (notebookname, tag, key)\r\n db_cursor.execute(\"INSERT OR REPLACE\"\r\n +\" INTO tags_to_keys\"\r\n +\" (notebook, tag, keyword)\"\r\n +\" VALUES (?,?,?);\",\r\n value_tuple)", "def add_tag (self,tag,key):\r\n\r\n #with shelf\r\n\r\n if self.using_shelf:\r\n\r\n if tag in self.tag_dict:\r\n\r\n self.tag_dict[tag].add(key)\r\n\r\n else:\r\n\r\n self.tag_dict[tag] = {key}\r\n\r\n #with database\r\n\r\n if self.using_database:\r\n\r\n value_tuple = (notebookname, tag, key,)\r\n db_cursor.execute(\"INSERT OR REPLACE \"\r\n +\"INTO tags_to_keys \"\r\n +\"(notebook, tag, keyword) \"\r\n +\"VALUES (?,?,?);\",value_tuple)", "def add_tag(self, transaction, citation_handle, tag_handle):\n citation = self.dbstate.db.get_citation_from_handle(citation_handle)\n citation.add_tag(tag_handle)\n self.dbstate.db.commit_citation(citation, transaction)", "def insert(self, data):\r\n pass", "def tag():\n conn = create_conn()\n c = conn.cursor()\n tags = load_emails()\n c.executemany('INSERT INTO tags (filepath, tag) VALUES (?,?)', tags)\n conn.commit()\n conn.close()", "def append(self, tag):\r\n self.insert(len(self.contents), tag)", "def test_add_tag(self):\n fc = self.read_feature(region='Adriatic_Sea')\n\n fc.tag(tags=['tag1', 'tag2', 'Mediterranean_Basin'])\n assert (fc.features[0]['properties']['tags'] ==\n 'Adriatic_Sea;Mediterranean_Basin;tag1;tag2')\n\n self.check_feature(fc.features[0])", "def insertTagLink(self, tag_id, image_id, tag_data=None, notes=None):\n\t\t#Link a tag to an image\n\t\tsql=\"REPLACE INTO tag_links (tag_id, image_id, user_id, tag_data, notes) VALUES (%s, %s, %s, %s, %s)\"\n\t\tself.query(sql, values=(tag_id, image_id, self.user_id, tag_data, notes))", "def tag_card(self, card, tag: str):\n list = self.tags[tag]\n list.append(card.multiverse_id)\n self.db.tag_card_add(tag, card.multiverse_id)", "def add_tag(self, tag: str) -> None:\n tags = self.get_tag_index()\n tags.append(tag)\n self.write_tag_index(list(set(tags)))", "def tag(self, tag):\n self.tag = tag", "def add_tag_to_db():\n new_tag = Tag(name=request.form['name'])\n\n db.session.add(new_tag)\n db.session.commit()\n\n flash(f\"Tag '{new_tag.name}' was successfully added\")\n\n return redirect('/tags')", "def add_tag(self, tag):\n\n # directional relation: tag is the blank of everything in the list\n self.relations[tag] = {\n \"overlord\": [],\n \"hegemon\": [], # for tributary\n \"tributary\": [],\n \"vassal\": [],\n \"guaranteeing\": [],\n \"guarantor\": [],\n \"alliance\": [],\n \"senior\": [],\n \"junior\": [],\n \"marriage\": []\n }", "def add_tag(e, driver):\n tag = random_tag(8)\n e.find_element_by_class_name('add-tag').click()\n \n driver.find_element_by_class_name('tag_input')\\\n .send_keys(tag)\n driver.find_element_by_class_name('tag_input')\\\n .send_keys(Keys.ENTER)\n # driver.find_elements_by_class_name('save-tag').click()\n return tag", "async def addtags(self, ctx, tag, *, data):\r\n\t\tTag = self.settings.ServerConfig(ctx.guild.id, 'Tags')\r\n\t\tif not tag in Tag:\r\n\t\t\tTag[tag] = self.Conf.Tags\r\n\t\t\tawait ctx.send('Added Tag: {}'.format(tag))\r\n\t\telse:\r\n\t\t\tawait ctx.send('Edited Tag: '.format(tag))\r\n\r\n\t\tnowgmt = time.strftime(\"%H:%M:%S, %d/%m/%Y\", time.gmtime())\r\n\t\t\r\n\t\tTag[tag]['user'] = ctx.author.id\r\n\t\tTag[tag]['data'] = data\r\n\t\tTag[tag]['time'] = nowgmt\r\n\t\tself.settings.ServerConfig(ctx.guild.id, 'Tags', Tag)", "def tag(self, text):\n\t\tpass", "def tag(self, tag):\n \n if isinstance(tag, six.integer_types):\n try:\n tag = Tag.objects.get(pk=tag, owner=self.owner)\n except Tag.DoesNotExist:\n #Handle this better?\n return\n \n if isinstance(tag, six.string_types):\n tname = tag\n try:\n tag = Tag(owner=self.owner, name=tag)\n tag.save()\n except IntegrityError:\n tag = Tag.objects.get(slug=makeslug(tname), owner=self.owner)\n \n tag.save() # If this isn't here there are crashes for some reason\n self.tags.add(tag)", "def tag(self, sent):\n # WORK HERE!!", "def add(self, tag):\n self.tags[tag.name] = tag", "def insert(self):\n pass", "def add_tag(convo_ID, tag_ID):\n # Make API request\n url = \"https://api2.frontapp.com/conversations/\" + convo_ID + \"/tags\"\n payload = json.dumps({\"tag_ids\": [tag_ID]})\n headers = {\"Authorization\": BEARER_TOKEN, \"Content-Type\": \"application/json\"}\n requests.request(\"POST\", url, headers=headers, data=payload)", "def tagger():", "def add_tag(self, tag):\n self.tags.append(tag)", "def insert_to_database(self, db):\n \n self.remove_bad_characters()\n print(\"Inserting \"+self.categorie_name+\" to database.\")\n db.query(\"INSERT INTO categorie (categorie_name) VALUES (:categorie_name)\", \\\n categorie_name=self.categorie_name)", "def insert(self, word):\n now = self.tree\n for i in word:\n now[i] = now.setdefault(i,{})\n now = now[i]\n now['end']=True", "def insert_tag(self, tagname, attrs=[], text=\"\",\n autoclose=False, newline=False):\n if autoclose:\n self.str += '\\n<%s%s%s>' % (tagname, string_for_attrs(attrs),\n ' /' if self.is_xml else '')\n else:\n self.push_tag(tagname, attrs)\n if text:\n if newline:\n self.insert_text('\\n' + indent(text, self.indent_level()))\n else:\n self.insert_text(text)\n self.pop_tag(newline=newline)", "def on_insert(self) -> None:", "def add_tag(self, key, value=''):\r\n status = self.connection.create_tags([self.id], {key : value})\r\n if self.tags is None:\r\n self.tags = TagSet()\r\n self.tags[key] = value", "def add_tag(self, dataset: \"Dataset\", tag: \"DatasetTag\"):\n raise NotImplementedError", "def add_tag(self, session, tag):\n self._tag(session.put, key=tag, session=session)", "def insert(self, key, value):\n tags = self.__all_tags()\n if value not in tags:\n tags.insert(key, value)\n self.__post_changes(tags)", "def update_tag(tag):\n remove_tag(tag)\n add_tag(tag)", "def createTag(self, authenticationToken, tag):\r\n pass", "def put_tag(self, key, tag):\n self._entries[key] = tag", "def setTag(self, tag, btag_obj):\n\n # Search for the tag\n if (len(self._tagmap) > 0):\n i = bisect.bisect_left(self._tags, tag)\n # Tag exists -> store new value\n if i != len(self._tagmap) and self._tagmap[i][0] == tag:\n self._data[self._tagmap[i][1]] = (tag, btag_obj)\n return\n self._tagmap.append((tag, len(self._data)))\n self._tags.append(tag)\n if len(self._tagmap) > 1:\n self._tagmap.sort(key=lambda x: x[0])\n for i in range(len(self._tagmap)):\n self._tags[i] = self._tagmap[i][0]\n self._data.append((tag, btag_obj))", "def add_new_tag():\n\n return render_template('create-tag.html')", "def create_tag(self, entry_name, tag):\n return self.__datacatalog.create_tag(parent=entry_name, tag=tag)", "def InsertData(conn, task):\n # Ejemplo:\n # task = (i, symbol, baseAsset, quoteAsset, status)\n # conectbd.InsertData(conn=conectbd.create_connection(), task=task)\n\n sql = ''' INSERT INTO criptomonedas(id,symbol,baseAsset,quoteAsset,status)\n VALUES(?,?,?,?,?) '''\n cur = conn.cursor()\n cur.execute(sql, task)\n conn.commit()", "def add_new_tag():\n\n name = request.form.get('name')\n\n new_tag = Tag(name=name)\n db.session.add(new_tag)\n db.session.commit()\n\n return redirect(f'/tags')", "def guardar_tag(self, tipo_tag, data, marcar_ro):\n self._info(\"guardar_tag\")\n return self.controller.guardar_tag(tipo_tag, b64decode(data),\n marcar_ro)", "def create_tag_with_entry(title):\n tag = Tag.objects.create(title=title)\n tag.save()\n tag.entry.add(1)\n return tag", "def add_tags_to_photo(self, photo_id, tag_list):\n print('\\nHello from add_tags_to_photo, the tag list is: ', tag_list)\n\n # for each tag\n # check if the tag is in the database already\n # if it is not then add it to the tag table\n for tag in tag_list:\n\n # will return None if the tag is not in the tag table\n # tag_name is the column name\n data = self.db.get_row('tag', 'tag_name', tag)\n\n print('data is', data)\n\n if data is None:\n\n print('\\nthat value {} is not in the db\\n'.format(tag))\n\n self.db.make_query(\n '''\n insert into tag (tag_name, user_id, photos)\n values (\"{}\", \"{}\", {})\n '''.format(\n tag,\n '28035310@N00',\n self.get_photo_count_by_tag(tag)\n )\n )\n\n print('\\nshould be added now...\\n')\n\n if self.db.get_row('tag', 'tag_name', tag):\n print('\\nadded tag, ', tag, '\\n')\n\n # UNIQUE constraint can cause problems here\n # so catch any exceptions\n try:\n # The tag is now in the database.\n self.db.make_query(\n '''\n insert into photo_tag (photo_id, tag_name)\n values ({}, \"{}\")\n '''.format(photo_id, tag)\n )\n except Exception as e:\n print('Problem adding tag to photo_tag ', e)\n\n data = self.db.make_query(\n '''\n select * from photo_tag where photo_id = {}\n '''.format(photo_id)\n )\n\n tags_in_data = []\n if len(data) > 0:\n for tag in data:\n tags_in_data.append(tag[1])\n\n print(tags_in_data)\n for tag in tag_list:\n if tag not in tags_in_data:\n return False\n else:\n self.update_photo_count(tag)\n\n return True", "def insertar(self,valor):\n self.stack.append(valor)\n print(\"El valor ha sido agregado correctamente\")", "def insert(self, name, address, city, state, zipcode, hour, phone, rating, image):\r\n pass", "def add_tag(request):\n if request.method != 'POST':\n return HttpResponseRedirect(reverse('wainz.views.composite'))\n else:\n img_id = request.POST['id']\n try:\n img = Image.objects.get(pk=img_id)\n except:\n return HttpResponseRedirect(reverse('wainz.views.composite'))\n tag_val = request.POST['tag']\n try:\n tag = tag_utils.TagsFromText(tag_val)[0]\n added = True\n img.tags.add(tag)\n img.save()\n except:\n added = False\n resp = rest.rest_success(request, img_id)\n respJson = json.loads(resp.content)\n respJson['added'] = added\n resp.content = json.dumps(respJson)\n return resp", "def after_insert(self, obj, st):\n pass", "def load_tags(tag_list):\n\n tag_insert = \"INSERT INTO release_tag VALUES\" \\\n \" (?, ?, ?, ?, ?, ?)\"\n dbutils.load_list(tag_insert, tag_list, DATABASE_FILE)", "def handle_add_new_tag():\n tag = Tag(name=request.form['name'])\n\n db.session.add(tag)\n db.session.commit()\n\n return redirect('/tags')", "def insert(self, b):\n self.liste.append(b)", "def GachaCraftNodeExcelAddTag_(builder, Tag_):\n return AddTag_(builder, Tag_)", "def insertPotenziale(self,query_id,l):\r\n\t\tfor i in l:\r\n\t\t\tprint \"inserisco potenziale {0} per query: {1}\".format(i,query_id)\r\n\t\t\tself.session.add(Potenziale(query_id,i[0],i[1]))\r\n\t\t\t#self.session.commit()\r", "def populateSQlite(tagDf): \n conn = sqlite3.connect(os.path.join(prefix, args.db))\n with conn:\n cur = conn.cursor()\n cmds = ['INSERT INTO value VALUES(%d, \\\"%s\\\", %d);' % (r[0], r[1], r[2]) for i, r in tagDf.iterrows()]\n cmds = \"\\n\".join(cmds)\n cur.executescript(cmds)\n conn.commit()", "def tag(self, tag):\n\n self._tag = tag", "def tag(self, tag):\n\n self._tag = tag", "def tag(self, tag):\n\n self._tag = tag", "def tag(self, tag):\n\n self._tag = tag", "def add_tag():\n\n tag_name = request.form[\"name\"]\n\n if not tag_name:\n flash(\"Please enter tag name\")\n return redirect(\"/tags/new\")\n\n tag = Tag(name=tag_name)\n db.session.add(tag)\n db.session.commit()\n\n return redirect(\"/tags\")", "def _add_tags(self):\n\n if self.version != 'live':\n return\n\n tags = [t.strip() for t in self.tags_text.split(',')]\n tags = list(set(tags))\n\n for tag_name in tags:\n tag_slug = slugify(tag_name)\n if tag_slug:\n try:\n tag = Tag.objects.get(blog=self.blog, slug=tag_slug)\n except Tag.DoesNotExist:\n tag = Tag( blog = self.blog,\n name = tag_name,\n slug = tag_slug)\n\n tag.increment()\n tag.save()\n\n self.tags.add(tag)", "def before_insert(self, obj, st):\n pass", "def add_tag():\n \n return render_template('tags/add_tag.html')", "def addTag(id = 0):\n\tinsertQueries.addTag(request.form)\n\tresults = queries.package(id)\n\treturn render_template('package.html', package=results)", "def create_note(self, text, tag_list):\n self.note.note_text = text\n self.note.save()\n\n for tag in tag_list:\n db_tags = Tags.objects.all() \n found = False\n\n for t in db_tags:\n if t.tag_text == tag:\n t.notes.add(self.note)\n found = True\n\n if found == False: \n new_tag = Tags() \n new_tag.tag_text = tag\n new_tag.save()\n new_tag.notes.add(self.note)\n new_tag.save()", "async def add_tags(tags: List[str], map_name: str, conn: Connection, channel: TextChannel) -> None:\n for tag in tags:\n insert_sql = \"\"\"insert into tags(tag_name, map_id) \n select ?, (select map_id from maps where map_path=?)\n where not exists\n (select * from tags where tag_name = ? and map_id = (select map_id from maps where map_path=?)) \"\"\"\n select(conn, insert_sql, (tag, map_name, tag, map_name))\n await channel.send(f\"Added tags `{' '.join(tags)}` for map {map_name} if it wasn't set\")", "def insert(self, data, language = 'N3') :\n\t\tif language == 'N3' :\n\t\t\tif type(data) == dict :\n\t\t\t\tdata = python_to_n3(data)\n\t\t\t\tlanguage = 'N3'\n\t\t#print 'data',data\n\t\tf = urlopen(self.sparul.baseURI, urlencode({'insert' : data, 'lang' : language}))\n\t\t# self.update_new_uri()", "def set_tag(self, tag):\n self.update(tag=tag)", "def tags_new_page():\n post_ids =[int(n) for n in request.form.getlist(\"posts\")]\n posts = Post.query.filter(Post.id.in_(post_ids)).all()\n new_tag = Tag(name=request.form[\"name\"], posts=posts)\n\n db.session.add(new_tag)\n db.session.commit()\n\n flash(f\"'{new_tag.name}' Tag added. \")\n\n return redirect(\"/tags\")", "def add_tag(foodgroup, tag, session):\n if type(tag)==int:\n tag_id = tag\n else:\n tag_id = session.query(Tag.id) \\\n .filter(Tag.name==tag) \\\n .one()[0]\n ndbnos_d = {}\n#NDBNO ids of all nutritions in specific foodgroup \n ndbnos = session.query(LocalNutrition.ndbno, LocalNutrition.desc) \\\n .filter(LocalNutrition.foodgroup==foodgroup)\n for ndbno, desc in ndbnos:\n ndbnos_d[ndbno]=desc\n ndbnos_l = ndbnos_d.keys()\n#NDBNO ids of specific foodgroup that already had tag\n already_added_ndbnos = session.query(TagItem.ndbno) \\\n .filter(TagItem.tag_id==tag_id) \\\n .filter(TagItem.ndbno.in_(ndbnos_l))\n already_added_ndbnos_s = set((x[0] for x in already_added_ndbnos))\n for ndbno, desc in ndbnos_d.items():\n added = ndbno in already_added_ndbnos_s\n print (ndbno, desc, added)\n all_ndbnos_s = set(ndbnos_l)\n to_add_ndbnos = all_ndbnos_s.difference(already_added_ndbnos_s)\n for ndbno in to_add_ndbnos:\n tag_item = TagItem(ndbno=ndbno, tag_id=tag_id)\n session.add(tag_item)", "def create_tag():\n \n name = request.form['tag_name']\n\n if \"name\" in session:\n return redirect(\"/tags\")\n\n else:\n new_tag = Tag(name = name)\n db.session.add(new_tag)\n db.session.commit()\n return redirect(\"/tags\")", "def push_tag(self, tag):\n _tag_entity('task', self.task_id, tag)", "def test_add_tagitem(self):\n record = self.good_record()\n record['tag'] = self.tag\n # Create a fake record in mongo\n id = self.images.insert(record)\n\n status = self.m.add_tag(id, self.system, 'testtag')\n self.assertTrue(status)\n rec = self.images.find_one({'_id': id})\n self.assertIsNotNone(rec)\n self.assertIn(self.tag, rec['tag'])\n self.assertIn('testtag', rec['tag'])", "def insertar(self, cliente):\n self.enfila+=1\n self.fila.append(cliente)", "def insertar(self, cliente):\n self.enfila+=1\n self.fila.append(cliente)", "def add_cash_tag(self, cash_tags):\n for cash_tag in cash_tags:\n if check_data_exist(cash_tag) is True:\n self.cash_tag += cash_tag.text", "def set_tag(self, t) -> None:\n self.tag = t", "def insertarServicio(fila):\n try:\n conexion.cur.execute('insert into servicios(codigoReservaServicio,concepto, precio) values(?,?,?)', fila)\n conexion.conex.commit()\n\n except sqlite3.OperationalError as e:\n print(e)\n conexion.conex.rollback()", "def addArgos(row, tag_id, animal_id, timevalue, gt, bd):\r\n feature_id = 0\r\n feature_type = 'argos'\r\n try:\r\n dev = (tag_id, animal_id, timevalue, feature_type, gt) # instantiate Argos object\r\n argosObj = tables.Argos(*dev, **row) # returns 0 if duplicate\r\n feature_id = dbutil.dbTransact(conn,argosObj.sql_insert(),\r\n argosObj.param_dict())\r\n if feature_id:\r\n transmit_id = addTransmit(feature_id, row, bd)\r\n\r\n except Exception as e:\r\n print 'addArgos Error '+ e.message\r\n conn.rollback()\r\n finally:\r\n dev = None\r\n argosObj = None\r\n conn.commit()\r\n return feature_id", "def insert(conn, table_info, table_data):\n\n sql = ''' INSERT INTO ''' + table_info \n + ''' VALUES(''' + \"?,\" * (len(table_data)-1) + \"?)\"\n cursor = conn.cursor()\n cursor.execute(sql, table_data)\n conn.commit()", "def insert(self, word):\n if not word:\n return\n if word[0] in self.trie:\n cur = self.trie[word[0]]\n else:\n cur = TrieNode(word[0])\n for char in word[1:]:\n if char not in cur.nexts:\n cur.nexts[char] = TrieNode(char)\n cur = cur.nexts[char]\n cur.isTerm = True", "def create_tag(name):\n name = name.strip().lower()\n tag = Tags(name)\n try:\n db_session.add(tag)\n db_session.commit()\n except exc.IntegrityError as err:\n db_session.rollback()\n return 'Tag \"%s\" has not been added - already exists: %s.' % (name, err), 'warning', None\n return 'Tag \"%s\" has been added.' % name, 'success', tag", "def insertarhab(fila):\n try:\n conexion.cur.execute('insert into habitacion(numero,tipo,prezo,libre) values(?,?,?,?)', fila)\n conexion.conex.commit()\n except sqlite3.OperationalError as e:\n print(e)\n conexion.conex.rollback()", "def insertarhab(fila):\n try:\n conexion.cur.execute('insert into habitacion(numero,tipo,prezo,libre) values(?,?,?,?)', fila)\n conexion.conex.commit()\n except sqlite3.OperationalError as e:\n print(e)\n conexion.conex.rollback()", "def insert(self, word: str) -> None:\n currnode=self.root\n for ch in word:\n #dic.get(parameter, default value)\n node=currnode.children.get(ch,TrieNode())\n currnode.children[ch]=node\n currnode=node\n \n currnode.iswordend=True", "def add_tag(names, tag):\n for name in names:\n b = Box.query.filter_by(name=name).first()\n b.tags.add(tag)\n db.session.commit()", "def insert_data(self):\n\n pass", "def _insert_op(self, op):", "def add_tag(session, tag_name, user_id=None, username='system_user'):\n session = validate_session(session)\n date_created=datetime.now()\n try:\n add_tag = TagInfo(tag_name, date_created, user_id)\n session.add(add_tag)\n session.commit()\n return(True, \"Tag %s added\" % (tag_name), add_tag)\n except Exception as e:\n session.rollback()\n return(False, \"Tag %s failed to add\" % (tag_name))", "def testTags(self):\n db = beerlogdb.BeerLogDB(self.DB_PATH)\n db.known_tags_list = {\n '0x0': {'name': 'toto', 'glass': 33},\n '0x2': {'name': 'toto', 'glass': 45}\n }\n db.AddEntry('0x0', '')\n db.AddEntry('0x2', '')\n\n with tempfile.NamedTemporaryFile(mode='w+') as temp:\n temp.write(json.dumps({\n '0x0':{'name': 'Kikoo', 'glass': '30'},\n '0x2':{'name': 'name', 'realname': 'realname', 'glass': '45'}}))\n temp.flush()\n db.LoadTagsDB(temp.name)\n l = db.known_tags_list\n self.assertEqual(2, len(l))\n\n self.assertEqual('Kikoo', db.GetNameFromHexID('0x0'))\n self.assertEqual('realname', db.GetNameFromHexID('0x2'))\n\n self.assertEqual(None, db.GetNameFromHexID('0x1'))\n\n self.assertEqual(db.GetCharacterFromHexID('0x0').name, 'Kikoo')\n self.assertEqual(db.GetCharacterFromHexID('0x2').name, 'realname')", "def test_add_remove_withtag(self):\n record = self.good_record()\n # Create a fake record in mongo\n id = self.images.insert(record)\n\n session = self.m.new_session(self.auth, self.system)\n i = self.query.copy()\n status = self.m.add_tag(id, self.system, 'testtag')\n self.assertTrue(status)\n rec = self.m.lookup(session, i)\n self.assertIsNotNone(rec)\n self.assertIn(self.tag, rec['tag'])\n self.assertIn('testtag', rec['tag'])", "def insert(self, word: str) -> None:\r\n nroot=self.root\r\n for i in word:\r\n \r\n # index=ord(i)-ord('a')\r\n if i not in nroot.children:\r\n nroot.children[i]=self.root\r\n nroot=nroot.children[i] \r\n \r\n nroot.endofword=True", "def addLine(\n self, tag: str, type: str, signal: str, \n pid: str, version: int, listId: str):\n query = f\"\"\"\n INSERT INTO lines (tag, type, signal, pid, version, listId)\n VALUES ('{tag}', '{type}', '{signal}', \n '{pid}', {version}, {listId});\n \"\"\"\n return sql.executeQuery(self.connection, query)", "def insert(sql, clue):\n\t# clue is [game, airdate, round, category, value, clue, answer]\n\t# note that at this point, clue[4] is False if round is 3\n\tif \"\\\\\\'\" in clue[6]:\n\t\tclue[6] = clue[6].replace(\"\\\\\\'\", \"'\")\n\tif \"\\\\\\\"\" in clue[6]:\n\t\tclue[6] = clue[6].replace(\"\\\\\\\"\", \"\\\"\")\n\tif not sql:\n\t\tprint clue\n\t\treturn\n\tsql.execute(\"INSERT OR IGNORE INTO airdates VALUES(?, ?);\", (clue[0], clue[1], ))\n\tsql.execute(\"INSERT OR IGNORE INTO categories(category) VALUES(?);\", (clue[3], ))\n\tcategory_id = sql.execute(\"SELECT id FROM categories WHERE category = ?;\", (clue[3], )).fetchone()[0]\n\tclue_id = sql.execute(\"INSERT INTO documents(clue, answer) VALUES(?, ?);\", (clue[5], clue[6], )).lastrowid\n\tsql.execute(\"INSERT INTO clues(game, round, value) VALUES(?, ?, ?);\", (clue[0], clue[2], clue[4], ))\n\tsql.execute(\"INSERT INTO classifications VALUES(?, ?)\", (clue_id, category_id, ))", "def addConnection(tagA, tagB): #@NoSelf", "def test_tags_on_article(self):\n self.article.tags.add(self.tag1, self.tag2)\n self.assertEqual('Django', str(self.article.tags.all()[0]))", "def insert(self, product):\n pass", "def insert(self, word: str) -> None:\n temp=self.root\n \n for char in word:\n # if the index corresponding to value of char is None, then this char is not present\n if(not temp.children[ord(char)-ord('a')]):\n temp.children[ord(char)-ord('a')]=TrieNode()\n \n # move to new location in trie\n temp=temp.children[ord(char)-ord('a')]\n \n #after the word has been traversed , mark the last trie node as endofword\n temp.endOfWord=True", "def insert(self, key, value):\n if key in self.map:\n return\n\n try:\n tag_key = TagKey(key)\n tag_val = TagValue(value)\n self.map[tag_key] = tag_val\n except ValueError:\n raise", "def tags():" ]
[ "0.6976025", "0.6736013", "0.6654346", "0.62465644", "0.61812353", "0.61108446", "0.6020931", "0.60011894", "0.5945555", "0.5943481", "0.59106773", "0.5909345", "0.5869094", "0.5852946", "0.5828006", "0.5823748", "0.58234936", "0.5821268", "0.58135855", "0.57451326", "0.5730849", "0.5701495", "0.5692834", "0.5686897", "0.56754094", "0.5675281", "0.5654106", "0.5645849", "0.5644535", "0.56394947", "0.5613889", "0.55908895", "0.55737776", "0.5552322", "0.55450785", "0.554065", "0.55323285", "0.55102867", "0.55101824", "0.5509351", "0.55022", "0.5477222", "0.54636717", "0.5455174", "0.5444501", "0.543065", "0.5425928", "0.54207265", "0.54160434", "0.5403266", "0.5396617", "0.53877264", "0.5378791", "0.5376184", "0.5355833", "0.53512305", "0.53512305", "0.53512305", "0.53512305", "0.5343659", "0.53269047", "0.5321371", "0.5309134", "0.52970207", "0.52831185", "0.52719176", "0.52672684", "0.52629477", "0.5261256", "0.52586", "0.5258224", "0.5257992", "0.52521807", "0.5251291", "0.5251291", "0.5248502", "0.52442825", "0.5235386", "0.5233946", "0.5232809", "0.5230303", "0.5227923", "0.52277243", "0.52277243", "0.5221891", "0.5209602", "0.5198038", "0.5191174", "0.5190053", "0.5186775", "0.5186482", "0.5185301", "0.51839674", "0.51807845", "0.517265", "0.51722217", "0.5168531", "0.5167384", "0.51566744", "0.51466036" ]
0.5345447
59
Reemplaza la(s) ocurrencia(s) de tag en el archivo por nstr
def rep(self,tag,nstr): tmp = [] for line in self.content: if tag in line: tmp.append(line.replace(tag,nstr)) else: tmp.append(line) self.content = tmp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ler_arquivo_xml(self, diretorio):\r\n with open(diretorio, 'r') as fxml:\r\n\t strfx = fxml.readlines()\r\n\t string = \"\".join(strfx).replace(\"&\",\" e \")\r\n return string", "def archivoXl(archivo):\r\n return ow(archivo)", "def SV_tag_length(tag_file, outPrefix):\r\n outdir = \"/\".join(outPrefix.split(\"/\")[:-1])\r\n outdir = outdir + \"/\"\r\n if not os.path.exists(outdir):\r\n os.mkdir(outdir)\r\n ins_h = open(outPrefix + \"_INS_DEL.txt\", \"w\")\r\n inv_h = open(outPrefix + \"_INV_DUP.txt\", \"w\")\r\n ins_h.write(\"Tag\\tSVType\\tSVLength\\n\")\r\n inv_h.write(\"Tag\\tSVType\\tSVLength\\n\")\r\n \r\n tag_h = open(tag_file, \"r\")\r\n header = tag_h.readline().strip()\r\n for line in tag_h:\r\n lines = line.strip().split(\"\\t\")\r\n tag = lines[0]\r\n tags = tag.split(\"-\")\r\n length = tags[2]\r\n SVType = tags[3]\r\n if SVType == \"INS\" or SVType == \"DEL\":\r\n ins_h.write(\"%s\\t%s\\t%s\\n\" % (tag, SVType, length))\r\n elif SVType == \"INV\" or SVType == \"DUP\":\r\n inv_h.write(\"%s\\t%s\\t%s\\n\" % (tag, SVType, length))\r\n else:\r\n print(\"Please ckeck whether INS, DEL, INV or DUP is in description %s.\" % tag)\r\n tag_h.close()\r\n inv_h.close()\r\n ins_h.close()", "def make_tag_data_raw_fast(mdp,filename):\n #\n fin = open(filename,'r')\n iter = 0\n for line in fin:\n lsp = line.split(' ')\n if len(lsp) > 1: # skip empty lines\n if lsp[0] == \"comb_path\":\n update_params(mdp,lsp)\n if not mdp.flag_out_open: ## -- try to open output file\n try:\n if mdp.flag_overwrite == \"True\": ## check string value!\n ## -- open save file for read+write\n try:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0) # go to beginning\n mdp.save_file.truncate() # delete whatever was there before\n except IOError:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'w')\n mdp.save_file.close()\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n mdp.flag_overwrite= False\n else:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0,2) # seek the end of file\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n except (AttributeError):\n print \"Attempted to open invalid output file\"\n ## -- try open output file\n for file in glob.glob(mdp.input_path):\n # get sign which corrects for boundary condition\n tvals = file.split('/')[-1].split('_')[3].split('t')\n try:\n ## flip sign if requested\n bcsign = ((int(tvals[1])+int(tvals[2])) != (int(tvals[1])+int(tvals[2])) % mdp.corr_len)\n except IndexError:\n ## 2-point function\n bcsign = False\n try:\n # open correlator file\n mdp.corr_file = open(file,'r')\n except IOError:\n print \"Could not open file \",file\n continue\n ## -- get tag\n ## baryons:\n #mdp.tag = '_'+file.split('/')[-1].split('_')[1][1:]+'_r'+file.split('/')[-1].split('_')[4][-1]\n ## with time source tag\n #mdp.tag = file.split('/')[-1].split('_')[3][:3]\\\n # +'_'+file.split('/')[-1].split('_')[1][1:]+'_'+file.split('/')[-1].split('_')[4][0]\\\n # +file.split('/')[-1].split('_')[4][3:]\n ## no time source tag\n mdp.tag = '_'+file.split('/')[-1].split('_')[1][1:]+'_'+file.split('/')[-1].split('_')[4][0]\\\n +file.split('/')[-1].split('_')[4][3:]\n #print file,',',mdp.tag\n iter+=1\n ##endif ! flag_out_open\n\n #save_data_fast(mdp)\n save_data_fast_bc(mdp,bcsign)\n mdp.corr_file.close()\n if iter%400 == 0:\n print \"file\",iter\n max_iter = None\n if not(max_iter is None) and iter==max_iter:\n print \"reached max file iterations, ending loop...\"\n break\n ## end comb_path\n pass\n\n elif lsp[0] == \"for\": # indicates when to get correlator\n lsp.pop(0)\n update_params(mdp,lsp)\n try:\n # open correlator file\n mdp.corr_file = open(mdp.input_path + '/' + mdp.input_fname,'r')\n except IOError:\n print \"Could not open file \",mdp.input_fname\n continue\n print mdp.input_fname\n if not mdp.flag_out_open:\n try:\n if mdp.flag_overwrite:\n ## -- open save file for read+write\n try:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0) # go to beginning\n mdp.save_file.truncate() # delete whatever was there before\n except IOError:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'w')\n mdp.save_file.close()\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n mdp.flag_overwrite= False\n else:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0,2) # seek the end of file\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n #except (IOError):\n # pass\n except (AttributeError):\n print \"Attempted to open invalid output file\"\n ##endif ! flag_out_open\n save_data_fast(mdp)\n mdp.corr_file.close()\n ##else \"for\" not found in control file\n else:\n update_params(mdp,lsp)\n ##endif lsp[0]==for\n ##endif len(lsp) > 1\n try:\n mdp.save_file.close()\n mdp.flag_out_open = False\n except (IOError,AttributeError):\n pass\n fin.close()\n return", "def save_to_fileobj(self, fileobj):\n writetags(fileobj, self.__dxftags__(), self.ENCODING)", "def tokenize_tag_with_unks(self, path, fname):\n assert os.path.exists(path)\n fpath = os.path.join(path, fname)\n with open(fpath, 'r') as f:\n lines = f.read().split('\\n')\n total_num = len(lines)\n \n # Tokenize file content\n tag_ids = torch.zeros((total_num, self.seq_len), dtype=torch.long)\n for i, line in enumerate(lines):\n if line.strip() != \"\":\n tags = line.strip().split()\n tag_ids[i, 0] = self.tag2idx['<SOS>']\n for j, tag in enumerate(tags[:self.seq_len-1]):\n if tag not in self.tag2idx:\n tag_ids[i, j+1] = self.tag2idx[\"<UNK>\"]\n else:\n tag_ids[i, j+1] = self.tag2idx[tag]\n if j+1 < self.seq_len-1:\n tag_ids[i, j+2] = self.tag2idx['<EOS>']\n return tag_ids", "def geneA(nombreA,listaPGA): #Esta sección fue hecha por Ángel\n with open(nombreA + \".txt\", \"w\") as archivo:\n archivo.writelines(listaPGA)", "def archivos_de_texto():\n palabra = \"\" \n palabras_candidatas = [] #lista donde se guardara las palabras candidatas de cada linea\n palabra_cantidad = {} #diccionario con la palabra candidata de clave y las veces que esta repetida en cada texto de valor\n with open(\"Cuentos.txt\",\"r\") as Cuentos: \n for linea_Cuentos in Cuentos: #cada ciclo del for es una linea del texto\n for caracter in linea_Cuentos: #cada ciclo del for es una caracter de la linea \n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter) #se transformas caracteres mayusculas y tildes\n palabra += caracter #cada caracter ira formando la palabra\n if not caracter.isalpha():\n if len(palabra) >= 5: #se analiza que la palabra tenga 5 o mas caracteres\n palabras_candidatas.append(palabra) \n palabra = \"\" #se vacia la palabra ya analizada\n for palabra_en_lista in palabras_candidatas: #se introduce las palabras candidatas a un diccionario\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [1,0,0]\n else:\n palabra_cantidad[palabra_en_lista] = [int(palabra_cantidad[palabra_en_lista][0]) + 1 , 0, 0]\n palabras_candidatas = []\n with open(\"La araña negra - tomo 1.txt\",\"r\") as La_arana_negra:#se repite el mismo proceso con los otros dos textos\n for linea_Cuentos in La_arana_negra:\n for caracter in linea_Cuentos:\n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter)\n palabra += caracter\n if not caracter.isalpha():\n if len(palabra) >= 5:\n palabras_candidatas.append(palabra)\n palabra = \"\"\n for palabra_en_lista in palabras_candidatas:\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [0,1,0]\n else:\n palabra_cantidad[palabra_en_lista] = [palabra_cantidad[palabra_en_lista][0] , int(palabra_cantidad[palabra_en_lista][1]) + 1, 0]\n palabras_candidatas = [] \n with open(\"Las 1000 Noches y 1 Noche.txt\",\"r\") as muchas_noches: \n for linea_Cuentos in muchas_noches:\n for caracter in linea_Cuentos:\n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter)\n palabra += caracter\n if not caracter.isalpha():\n if len(palabra) >= 5:\n palabras_candidatas.append(palabra)\n palabra = \"\"\n for palabra_en_lista in palabras_candidatas:\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [0,0,1]\n else:\n palabra_cantidad[palabra_en_lista] = [palabra_cantidad[palabra_en_lista][0] ,palabra_cantidad[palabra_en_lista][1], int(palabra_cantidad[palabra_en_lista][2]) + 1]\n palabras_candidatas = [] \n palabra_cantidad = dict(sorted(palabra_cantidad.items())) #se ordena el diccionario alfabeticamente\n with open(\"palabras.csv\",\"w\") as palabras_csv: # se agrga el diccionario a un arcivo .csv\n for palabra in palabra_cantidad:\n palabras_csv.write(palabra)\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][0]))\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][1]))\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][2]))\n palabras_csv.write(\"\\n\")\n return palabra_cantidad", "def tag(referencefile):\n dirpath = path.abspath(referencefile)\n\n if path.isdir(dirpath):\n dircontents = listdir(dirpath)\n else:\n dirpath = path.split(dirpath)[0]\n dircontents = listdir(dirpath)\n\n while not 'tag' in dircontents:\n dirpath = path.split(dirpath)[0]\n dircontents = listdir(dirpath)\n if len(dircontents) == 0 or path.split(dirpath)[1] == 'chemistry':\n print(\"tag file not found\")\n return None\n\n return path.join(dirpath, 'tag')", "def ins(self,tag,nstr,jumpline=True):\n tmp = []\n strj = '\\n' if jumpline else ''\n for line in self.content:\n tmp.append(line)\n if tag in line:\n tmp.append(nstr + strj)\n self.content = tmp", "def quran_words_frequences_data(fileName):\n\n # Computing unique words\n unique_words = get_unique_words()\n comma_separated_unique_words = ''\n for word in unique_words:\n comma_separated_unique_words += word + ','\n\n # Removing the extra commas\n comma_separated_unique_words = comma_separated_unique_words.strip(',')\n\n\n\n # * Creating quran_words_frequences_data -- the root tag\n root = Element('quran_words_frequences')\n root.set('unique_words', comma_separated_unique_words)\n\n # * Add root to the tree\n tree = ElementTree(root)\n\n\n for suraNumber in range(1, 114 +1):\n\n sura = quran.get_sura(suraNumber)\n\n # * Creating sura Tag\n suraTag = Element('sura')\n\n # * set number attribute\n suraTag.set('number', str(suraNumber))\n\n # * set sura unique words\n # ??? update get_unique_words\n # suraTag.set('sura_unique_words', suraUniquewords)\n\n ayaCounter = 1\n for aya in sura:\n\n # Create aya Tag\n ayaTag = Element('aya')\n ayaTag.set('number', str(ayaCounter))\n\n # * Computes the words frequency for aya\n ayaWordsDict = get_frequency(aya)\n\n words_comma_separated = ''\n occurrence_comma_separated = ''\n\n for word in ayaWordsDict:\n words_comma_separated += word + ','\n occurrence_comma_separated += str(ayaWordsDict[word]) + ','\n\n # * The same order\n words_comma_separated = words_comma_separated.strip(',')\n occurrence_comma_separated = occurrence_comma_separated.strip(',')\n\n # * Add words & frequencies attributes\n ayaTag.set('unique_words', words_comma_separated)\n ayaTag.set('unique_words_frequencies', occurrence_comma_separated)\n\n\n # * Add aya tag to sura tag\n suraTag.append(ayaTag)\n\n ayaCounter += 1\n\n # * add suraTag to the root\n root.append(suraTag)\n\n\n # print(prettify(root))\n\n file = open(fileName, 'w')\n file.write(prettify(root))\n file.close()", "def okoo_merge_label(file_name):\n labels_dic = {}\n label = 0\n with open(\"label_doc_3\", encoding='utf-8') as f:\n for line in f:\n if len(line) < 2:\n continue\n for key in re.findall('(\\d+)', line):\n labels_dic[''.join(key)] = label\n label += 1\n cur_true_label = label + 1\n with open(file_name, encoding='utf-8') as f1:\n texts = []\n data = json.load(f1)['all']\n for text_ in data:\n label = text_['label']\n if label in labels_dic:\n text_['merged_label'] = labels_dic[label]\n else:\n print(text_)\n text_['merged_label'] = cur_true_label\n # text_['text'] = ' '.join([c[0] for c in thu0.fast_cut(text_['text'])])\n texts.append(text_)\n\n with open('okoo-merged-3-label.json', 'w', encoding='utf-8') as f:\n json.dump(texts, f, ensure_ascii=False, indent=4, separators=(',', ': '))", "def ner_nltk(filepath):\n\n out = \"\"\n\n with codecs.open(filepath,'r','utf-8') as current_file:\n\n text = current_file.readlines()\n\n with codecs.open(filepath+\".ner\",'w','utf-8') as outfile:\n\n for line in text:\n\n tokenized = line.split()\n tagged = pos_tag(tokenized)\n ne = ne_chunk(tagged)\n\n for index,token in enumerate(ne):\n if type(token) != tuple:\n outfile.write(' '.join([tok[0]+\"|\"+token.label() for tok in token])+' ')\n else:\n outfile.write(token[0]+' ')\n outfile.write('\\n')", "def extract_tags_to_file(data, filename):\n data.sort(key=lambda tag: tag[1], reverse=True)\n with open(filename, 'w') as f:\n # first four lines for metadata\n f.write(filename + '\\n')\n f.write('tags: %d\\n\\n\\n' % len(data))\n for tag in data:\n f.write('%s\\t\\t\\t%d\\n' % (tag[0], tag[1]))", "def tags():", "def tag_file_process(self, multiple_files):\n # the path is now becoming a string since it goes through the UI\n # text entry box, not a list or tuple any more, so we turn it to a\n # list of paths\n file_list = multiple_files.split(' ')\n # the main dictionary to store all tags\n tag_dict = dict()\n rows = []\n # now for all the tag file under the folder(root directory), we load\n # the data into the dictionary\n if len(file_list) == 0:\n tk.messagebox.showwarning('warning', 'no files chosen')\n else:\n for file_path in file_list:\n if os.path.isfile(file_path):\n with open(file_path, 'r', encoding='utf-8') as \\\n current_tag_file:\n # initialize the dictionary and the inner dictionary\n reader = csv.reader(current_tag_file)\n for row in reader:\n # the encode, decode is use to resolve the \"\\ueffa\"\n # BOM-utf8 problem\n row[0] = row[0].encode('utf-8').decode('utf-8-sig')\n tag_dict[row[0]] = dict()\n rows.append(row)\n # store the tag into the dictionary\n for row in rows:\n # the 1st column is the main key(mob fact col name)\n # the 2nd column is the tag id\n # the 3rd column is the tag with real meaning\n tag_dict[row[0]][row[1]] = row[2]\n\n else:\n tk.messagebox.showinfo('warning', 'can not obtain: ' +\n file_path)\n return tag_dict", "def loeschen(self):\r\n loeschen=self.REQUEST['loeschen']\r\n tit=''\r\n i=0\r\n j=0\r\n index=[]\r\n cursor=[]\r\n for x in self.objectValues('Image'):\r\n if str(x.id())[0:6] not in index:\r\n index.append(str(x.id())[0:6]) \r\n cursor.append([str(x.id())[0:6],str(x.title),[str(x.id())]])\r\n if str(x.id())[0:6]==loeschen:\r\n tit=str(x.title)\r\n j=i\r\n i=i+1\r\n else:\r\n cursor[-1][2].append(str(x.id()))\r\n #for val in cursor[j][2]:\r\n #self._delOb(self, id=str(val))\r\n #delet=delet+str(val)+' '\r\n self.manage_delObjects(ids=cursor[j][2])\r\n return tit+' gel&ouml;scht !'", "def tagger():", "def cleanup(segment):\n cnt = ''.join(segment.file_content)\n index = cnt.find('\\\\annotate')\n if index < 0:\n return\n while index >= 0:\n cnt, new_ind = parse_annotation(cnt, index)\n index = cnt.find('\\\\annotate', new_ind)\n f = codecs.open(segment.filename, 'w', 'utf-8')\n f.write(cnt)\n f.close()\n info('Updated: {} {}'.format(segment.voice_name, segment.name))", "def write_file(self, file_path, acc, dict_tags):\n logging.info('Escrevendo arquivo em {0}'.format(file_path))\n file_write = open(file_path, \"w\")\n file_write.write(\"Taxa de acerto geral: {0:.2f}%\\n\".format(np.mean(acc)*100))\n for key in dict_tags.keys():\n if dict_tags[key]['right'] > 0:\n file_write.write(\"Taxas de acerto para a classe '{0}': {1:.2f}% Total da classe '{0}': {2:.2f}%\\n\".format(key, \n (dict_tags[key]['pred']/dict_tags[key]['right'])*100, \n (dict_tags[key]['right']/dict_tags[key]['pres'])*100))\n else:\n file_write.write(\"Taxas de acerto para a classe '{0}': Nao presente no corpus de teste\\n\".format(key))\n\n file_write.close()", "def archivoXlFormateado(archivo):\r\n return ow(archivo, formatting_info=True)", "def replace_tag(tag, value, file):\r\n with open(file, \"r\") as origin:\r\n with open(file+\".replaced\", \"w\") as dest:\r\n dest.write(origin.read().replace(tag, str(value)))\r\n return file+\".replaced\"", "def escribir_indir(self, FILESYS, id,name_file=\"Xx.xXx.xXx.xXx.\",\n size_file=\"\",inicluster=\"\",cdate=\"\",mdate=\"\",no_use=\"\"):\n byte = 512\n tamanno_indir = 64\n id = int(id)\n try:\n FILESYS[byte+(tamanno_indir*id):byte+(tamanno_indir*id)+15] =\\\n ((\" \"*(15-len(str(name_file))))+str(name_file)).encode('ascii')\n except:\n print(\"Nombre no valido\")\n return False\n FILESYS[byte+(tamanno_indir*id)+16:byte+(tamanno_indir*id)+24] =\\\n (\"0\"*(8-len(str(size_file)))+str(size_file)).encode('ascii')\n FILESYS[byte+(tamanno_indir*id)+25:byte+(tamanno_indir*id)+30] =\\\n (\"0\"*(5-len(str(inicluster)))+str(inicluster)).encode('ascii')\n FILESYS[byte+(tamanno_indir*id)+31:byte+(tamanno_indir*id)+45] =\\\n (\"0\"*(14 - len(str(cdate)))+str(cdate)).encode('ascii')\n FILESYS[byte+(tamanno_indir*id)+46:byte+(tamanno_indir*id)+60] =\\\n (\"0\"*(14 - len(str(mdate)))+str(mdate)).encode('ascii')\n FILESYS[byte+(tamanno_indir*id)+61:byte+(tamanno_indir*id)+64] =\\\n (\"\\x00\"*(3 - len(str(no_use)))+str(no_use)).encode('ascii')\n return True", "def getArchivoVotacion():", "def carrega_endereco_tag_daruma(self, tag):\r\n daruma_dict = {\"LOCALARQUIVOS\":['START','LocalArquivos'],\r\n \"LOCALARQUIVOSRELATORIOS\":['START','LocalArquivosRelatorios'],\r\n \"LOGTAMMAXMB\":['START','LogTamMaxMB'],\r\n\t\t \"MODOOBSERVER\":['START','ModoObserver'],\r\n \"PATHBIBLIOTECASAUXILIARES\":['START','PathBibliotecasAuxiliares'],\r\n \"PRODUTO\":['START','Produto'],\r\n \"THREADAOINICIAR\":['START','ThreadAoIniciar'],\r\n \"TIPOREGISTRO\":['START','TipoRegistro'],\r\n \"TERMICA\":['DUAL','Termica'],\r\n \"DUALTAMANHOBOBINA\":['DUAL','TamanhoBobina'],\r\n \"DUALPORTACOMUNICACAO\":['DUAL','PortaComunicacao'],\r\n \"DUALVELOCIDADE\":['DUAL','Velocidade'],\r\n \"ROTA1\":['DUAL','Rota1'],\r\n \"ROTA2\":['DUAL','Rota2'],\r\n \"ROTA3\":['DUAL','Rota3'],\r\n \"ROTA4\":['DUAL','Rota4'],\r\n \"ROTA5\":['DUAL','Rota5'],\r\n \"ATIVAROTA\":['DUAL','AtivaRota'],\r\n \"AJUSTARDATAHORA\":['NFCE','AjustarDataHora'],\r\n \"AVISOCONTINGENCIA\":['NFCE','AvisoContingencia'],\r\n \"AUDITORIA\":['NFCE','Auditoria'],\r\n \"ENCONTRARIMPRESSORA\":['NFCE','EncontrarImpressora'],\r\n\t\t \"PATHARQUIVOSCTGOFFLINE\":['NFCE','PathArquivosCtgOffline'],\r\n \"MARCAIMPRESSORA\":['NFCE','IMPRESSORA\\MarcaImpressora'],\r\n \"NFCETAMANHOBOBINA\":['NFCE','IMPRESSORA\\TamanhoBobina'], \r\n \"NFCEPORTACOMUNICACAO\":['NFCE','IMPRESSORA\\PortaComunicacao'], \r\n \"NFCEVELOCIDADE\":['NFCE','IMPRESSORA\\Velocidade']\r\n }\r\n #\"ENDERECOSERVIDOR\":['NFCE','EnderecoServidor'],\r\n\r\n #if tag.upper() not in [x.upper() for x in daruma_dict.keys()]:\r\n if tag.upper() not in daruma_dict:\r\n raise Exception(\"-40: Tag XML DarumaFramework nao encontrada.\")\r\n return daruma_dict[tag.upper()]", "def file(self):\n\n dlos_filename = super(DlosPhotoz, self).file()\n\n photoz_str = 'DLOS_photoz_'\n \n file_name = photoz_str.join( \n dlos_filename.split('DLOS_')\n ) \n\n return file_name", "def createNew(f1):\n file1=open(f1)\n file2=open(r\"C:\\Users\\Devansh\\Desktop\\Projects\\img\\test.txt\",\"w+\")\n count=0\n text=\"\"\n chars=list(file1.read())\n prevChar=chars[0]\n for i in range(1,len(chars)):\n char=chars[i]\n #print(char,prevChar)\n if(char==prevChar):\n count+=1\n elif(char!=prevChar):\n if(count==1):\n text=text+char\n else:\n text=text+str(count)+char\n prevChar=char\n count=1\n file2.write(text)", "def convert(src, dst):\n with open(dst, 'w', encoding = 'utf-8') as myFile:\n records = read(src)\n for tag in sorted(records.keys()):\n myFile.write('%s %s\\n' %(tag, records[tag]))", "def filtraFileDiAnn(fileInput, geneNames):\n\n\t#---------------------\n\t# Creazione di una lista dove ogni elemento e' una riga del file \n\t# Ogni elem e' una lista di informazioni divise per colonne \n\t#\n\t# formato di un elemento di lines:\n\t#\n\t#\tPOSIZIONE \t\t\tCONTENUTO\n\t#\t\t0\t\t\t\t\tcromosoma\n\t#\t\t3\t\t\t\t\tstart\n\t#\t\t4\t\t\t\t\tend\n\t#\t\t6\t\t\t\t\tstrand\n\t#\t\t8\t\t\t\t\tgene_id\n\t#\t\t9\t\t\t\t\ttranscript_id\n\t#\t\t10\t\t\t\t\texon_number\n\t#\t\t11\t\t\t\t\tgene_name\n\t#\t\t12\t\t\t\t\ttranscript_name\t\n\t#\n\n\n\tstringa \t= '\\texon\\t'\n\tlines \t\t= []\n\tdictGeneChr = {}\n\t\n\t# Indici per il file di annotazione\n\t#\n\tidx_cromosoma = 0\n\tidx_geneName = 11\n\tidx_start = 3\n\tidx_end = 4\n\t\n\tfor x in open(fileInput):\n\t\triga = x.strip(';\\n').replace('; ','\\t').split('\\t')\n\n\t\tif not geneNames.has_key(riga[idx_geneName]):\n\t\t\tcontinue\n\t\t\t\t\n\t\t# Creazione del dizionario dei gene_name per ogni cromosoma\n\t\t#\n\t\tkey_geneChr = riga[idx_geneName] + '\\t' + riga[idx_cromosoma]\n\t\tif not dictGeneChr.has_key(key_geneChr):\n\t\t\tdictGeneChr[key_geneChr] = [riga[idx_start], riga[idx_end]]\n\t\telse:\n\t\t\t\n\t\t\t# Si aggiona il valore dello start del gene se si trova un \n\t\t\t# valore piu' piccolo\n\t\t\t#\n\t\t\tif int(dictGeneChr[key_geneChr][0]) > int(riga[idx_start]):\n\t\t\t\tdictGeneChr[key_geneChr][0] = riga[idx_start]\n\t\t\t\t\n\t\t\t# Si aggiorna il valore dell'end del gene se si trova un\n\t\t\t# valore piu' grande\n\t\t\t#\n\t\t\tif int(dictGeneChr[key_geneChr][1]) < int(riga[idx_end]):\t\n\t\t\t\tdictGeneChr[key_geneChr][1] = riga[idx_end]\n\t\t\n\t\t# Si filtra il file considerando solamente le regioni di tipo \"exon\"\n\t\t#\n\t\tif stringa in x:\n\t\t\tlines.append(riga)\n\n\treturn [lines, dictGeneChr]", "def lesFraFil(self, filnavn): \r\n self._sanger.clear()\r\n \r\n # Les en fil med musikk\r\n innfil = open(filnavn, mode='r')\r\n \r\n \r\n for i, linje in enumerate(innfil):\r\n biter = linje.strip().split(';')\r\n # sang = \"sang\" + str(i+1)\r\n # Opprett et objekt for hver sang\r\n # print(sang)\r\n sang = Sang(biter[0], biter[1])\r\n \r\n # Legg objektet(sangen) til i spillelisten\r\n self._sanger.append(sang)\r\n \r\n # Lukk filen\r\n innfil.close()\r\n # print(self._sanger)\r\n # print(allMusikk) NO\r\n print()", "def identify_file(self, file):", "def export_gexf_termos(rotulos,similaridades,nome_arquivo,threshold,excluir_negativos):\n\n tbl = dict.fromkeys(i for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith('P'))\n\n arquivo = codecs.open(nome_arquivo + \".gexf\",\"w\",\"utf-8\")\n arquivo.write('<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n')\n arquivo.write('<gexf xmlns=\"http://www.gexf.net/1.2draft\" version=\"1.2\">\\n')\n arquivo.write('\\t<graph mode=\"static\" defaultedgetype=\"undirected\">\\n')\n arquivo.write('\\t\\t\\t<nodes>\\n')\n arquivo.flush()\n\n cont=0\n cont2=0;\n for key in rotulos:\n arquivo.write(u\"\\t\\t\\t\\t<node id=\\\"%d\\\" label=\\\"%s\\\"/>\\n\" % (cont2,key))\n cont = cont+1\n cont2 = cont2+1\n if cont == 50:\n arquivo.flush()\n cont = 0\n\n arquivo.write('\\t\\t\\t</nodes>\\n')\n arquivo.write('\\t\\t\\t<edges>\\n')\n arquivo.flush()\n\n cont=0\n for similaridade in similaridades:\n if(excluir_negativos and (similaridade[2] < 0)):\n continue\n\n if abs(similaridade[2]) >= threshold:\n label = ' - '.join((similaridade[0],similaridade[1]))\n arquivo.write(\"\\t\\t\\t\\t<edge source=\\\"%d\\\" target=\\\"%d\\\" weight=\\\"%f\\\" label=\\\"%s\\\" />\\n\" % (rotulos.index(similaridade[0]),rotulos.index(similaridade[1]),similaridade[2],label))\n\n cont = cont+1\n if cont == 50:\n arquivo.flush()\n cont = 0\n\n arquivo.write('\\t\\t\\t</edges>\\n')\n arquivo.write('\\t</graph>\\n')\n arquivo.write('</gexf>')\n arquivo.close() # you can omit in most cases as the destructor will call it", "async def import_tags(f):\n conn = await db.get_db()\n cur = await conn.execute(\"select id, tag from poi\")\n poi_tags = {row[0]: row[1] async for row in cur}\n new_tags = {}\n for row in csv.DictReader(f):\n if not row['id'].isdecimal():\n continue\n poi_id = int(row['id'])\n tag = row['tag'].strip()\n if not tag:\n continue\n if poi_id not in poi_tags:\n continue\n if tag != poi_tags[poi_id]:\n await conn.execute(\"update poi set tag = ? where id = ?\", (tag, poi_id))\n if tag not in config.TAGS['tags']:\n if tag not in new_tags or not new_tags[tag]:\n new_tags[tag] = row['type'].strip()\n await conn.commit()\n\n if not new_tags:\n return None\n\n outfile = StringIO()\n print('tags:', file=outfile)\n for k, v in new_tags.items():\n print(f' {k}: [{v}]', file=outfile)\n outfile.seek(0)\n return outfile", "def get_formatted_file_tags(self):\n # type: () -> List[str]\n return sorted(\"-\".join(tag) for tag in self.file_tags)", "def to_filetag(self) -> str:\n return self.strftime(f\"{self.FormatCode.YEAR.WITH_CENTURY}{self.FormatCode.MONTH.NUM}{self.FormatCode.DAY.NUM}\")", "def extract_and_tag_test():\n test_untagged_path = os.getcwd() + \"/data/test/test_untagged/\"\n test_untagged_directory = os.fsencode(test_untagged_path)\n\n print(\"Tagging text. Please wait...\")\n for file in os.listdir(test_untagged_directory):\n filename = os.fsdecode(file)\n try:\n if filename.endswith(\".txt\"):\n text = entity_process.read_data(test_untagged_path, file)\n text = text.lower()\n header,body = entity_process.split_text(text)\n header_array = header.splitlines()\n\n\n start_time, end_time = entity_process.extract_time(header)\n location = entity_process.extract_location(header_array, body)\n speaker = entity_process.extract_speaker(header_array, body)\n\n entity_tagger.tag_all(filename, text, start_time, end_time, location, speaker)\n except Exception as e:\n raise e\n return \"No files found here!\"\n print(\"Tagging complete! Text saved to\" + os.getcwd() + \"/out\")", "def ouvrir_fichier(nom_du_fichier) :\r\n with open(nom_du_fichier,\"r\") as fichier_original : #on ouvre le fichier en mode r => read\r\n texte=\"\".join(fichier_original.read())\r\n if texte==\"\" : #si texte = RIEN alors FALSE\r\n return(False)\r\n return(texte)", "def sent_tokenize_tag_with_unks(self, path, fname):\n assert os.path.exists(path)\n fpath = os.path.join(path, fname)\n with open(fpath, 'r') as f:\n lines = f.read().split('\\n')\n \n # Tokenize file content\n all_tags = []\n for i, line in enumerate(lines):\n if line.strip() != \"\":\n tags = line.strip().split()\n tag_ids = torch.LongTensor(len(tags)+2)\n tag_ids[0] = self.tag2idx['<SOS>']\n for j, tag in enumerate(tags):\n if tag not in self.tag2idx:\n tag_ids[j+1] = self.tag2idx[\"<UNK>\"]\n else:\n tag_ids[j+1] = self.tag2idx[tag]\n tag_ids[j+2] = self.tag2idx['<EOS>']\n all_tags.append(tag_ids)\n return all_tags", "def URI_to_FILE(self,Nom,uri):\n tab=[]\n certs=self.MaBdd.get_orphan_by_obj(Nom)\n if certs:\n for cert in certs:\n tab.append(cert[0]+' : '+cert[1])\n tab.append('Les CVE')\n allcpe=self.MaBdd.get_tab_all_cpe_uri(uri)\n \n if allcpe:\n title=['CRC','CVE','Conf','OPE','Vuln','CPE','Start_excl','Start_incl','End_excl','End_incl','New']\n lgmax=self.Get_max_lg(allcpe,title)\n #0 c'est le CRC \n tab.append(\"|\".join([f\"{title[x]:{lgmax[x]}}\" for x in range(1,10)]))\n delta=lgmax[1]+lgmax[2]+lgmax[3] + 3\n test=\"test de repetition\"\n for cpe in allcpe:\n testlg=cpe[1]+'_'+str(cpe[2])+'_'+cpe[3]\n if test==testlg:\n mini=\"|\".join([f\"{cpe[x]:{lgmax[x]}}\" for x in range(4,10)])\n tab.append(f\"{' ':{delta}}{mini}\")\n else:\n tab.append(\"|\".join([f\"{cpe[x]:{lgmax[x]}}\" for x in range(1,10)]))\n test=cpe[1]+'_'+str(cpe[2])+'_'+cpe[3]\n file=file=open(f\"mogs/{Nom}.txt\",'w',encoding='utf-8')\n file.writelines('\\n'.join(tab))\n file.close()", "def renewFile(filename):\n\n\tfileRepo = repertoire + filename + extension # Position du fichier\n\n\t# Ouvre en ecriture et l'ecrase\n\t# La methode with ferme le fichier automatiquement\n\twith open(fileRepo, \"w\") as robFile:\n\t\trobFile.write(filename + \"\\n\") # Ecrit le nom du fichier au debut", "def add2_auto_skip(self):\n templateid = int(self.templatelist.curselection()[0])\n template = self.templatelist.get(templateid)\n with codecs.open(archivo, 'a', 'utf-8') as f:\n f.write('{{' + template)\n self.skip_file()", "def contador(filepath: str = sys.argv[1], numero: int = 100):\n \n # extraemos el texto\n with open(filepath, \"r\", encoding = \"utf-8\") as file:\n texto = file.read()\n \n # creamos el documento tokenizado con spacy\n doc = nlp(texto)\n \n # definimos parametros \n etiquetas_de_ruido = [\"PROPN\", # nombre propio\n \"SPACE\", # espacio\n \"PUNCT\", # punctuación\n \"CONJ\", # conjugación\n \"AUX\", # auxiliar\n \"ADP\", # adposición (preposición ó posposición)\n \"ADV\", # adverbio\n \"DET\", # determinante\n \"INTJ\", # interjección\n \"SCONJ\", # conjunción subordinada\n \"PRON\", # pronombre\n \"X\", # otro\n ] \n minimo_de_caracteres = 2\n\n # \n def esRuido(token):\n \"\"\"\n Esta función define si una palabra (o token) es ruido o no.\n \"\"\"\n es_ruido = False\n if token.pos_ in etiquetas_de_ruido:\n es_ruido = True \n elif token.is_stop == True:\n es_ruido = True\n elif len(token.string) <= minimo_de_caracteres:\n es_ruido = True\n return es_ruido \n def limpiador(token, minuscula = True):\n if minuscula:\n token = token.lower()\n return token.strip()\n\n # contador\n cuenta_limpia = [limpiador(palabra.lemma_) for palabra in doc if not esRuido(palabra)]\n\n top_100 = Counter(cuenta_limpia).most_common(numero)\n \n # salvar el archivo limpio \n path, filename = os.path.split(filepath)\n parent_dir, data_dir = os.path.split(path)\n filename, extension = os.path.splitext(filename)\n if \"-\" in filename:\n filename = filename.split(\"-\")[0]\n filepath_out = os.path.join(parent_dir, \"processed\", f\"{filename}-top{numero}.csv\")\n\n with open(filepath_out, \"w\", encoding = \"utf-8\",) as file:\n file.write(\"palabra,cuenta\\n\")\n for i in range(numero):\n file.write(f\"{top_100[i][0]},{top_100[i][1]}\\n\")\n \n return filepath_out, filename", "def putfilenameontop(idf, lines):\n openfile = '<%s>%s</%s>' % ('h4', idf.idfname, 'h4')\n lines = [openfile, '<hr>'] + lines\n return lines", "def tag_mapping(data_path, data_type):\n with open(data_path+data_type+\"_labels.txt\", \"r\") as file1:\n tags = [line.split(\" \")[:-1] for line in file1.readlines()]\n dico = create_dico(tags)\n dico[model.START_TAG] = -1\n dico[model.STOP_TAG] = -2\n tag_to_id, id_to_tag = create_mapping(dico)\n print(\"Found %i unique named entity tags\" % len(dico))\n return dico, tag_to_id, id_to_tag", "def convert(self, tag=\"Data\", delimiter=\",\", noheader=False,\n limit=-1, buffer_size=1000):\n\n\n file_ctr = 0\n item_ctr = 0\n for dirName, subdirList, fileList in os.walk(self.input_directory):\n print('Found directory: %s' % dirName)\n for fname in fileList:\n print('\\t%s' % fname)\n # open the xml file for iteration\n if not fname.endswith(\".xml\"):\n continue\n #pdb.set_trace()\n \n input_file = dirName + \"/\" + fname\n self.context = ETree.iterparse(input_file, events=(\"start\", \"end\"))\n\n # iterate through the xml\n items = [{}]\n\n depth = 0\n min_depth = 0\n row_depth = -1\n n = 0\n for event, elem in self.context:\n if event == \"start\":\n depth += 1\n continue\n else:\n depth -= 1\n if depth < min_depth:\n min_depth = depth\n\n if depth < row_depth and items:\n if noheader:\n noheader = False\n else:\n # new line\n self.output_buffer.append(items)\n items = []\n # flush buffer to disk\n if len(self.output_buffer) > buffer_size:\n self._write_buffer(delimiter)\n\n plain_tag = elem.tag\n last_delim = max(elem.tag.rfind('}'), elem.tag.rfind(':'))\n if 0 < last_delim < len(elem.tag) - 1:\n plain_tag = elem.tag[last_delim + 1:]\n if tag == plain_tag:\n if n == 0:\n min_depth = depth\n elif n == 1:\n row_depth = min_depth\n n += 1\n if 0 < limit < n:\n break\n elem_name = elem.get(\"name\")\n if elem_name in self.output_dict[0].keys():\n if elem_name == 'SamS.ArchivedURL':\n if hash(elem.text) in self.item_titles.keys() and self.item_titles[hash(elem.text)] == elem.text:\n #item is repetative\n self.output_dict[item_ctr]={}\n #item_ctr-=1\n break\n else:\n self.item_titles[hash(elem.text)] = elem.text\n self.output_dict[item_ctr][elem_name]= elem.text and elem.text.encode('utf8') or ''\n\n #if (len(self.output_dict[item_ctr]) > 0 ) :\n if ('SamS.ArchivedURL' in self.output_dict[item_ctr]):\n item_ctr+=1\n self.output_dict.append({})\n else:\n self.output_dict[item_ctr] = {}\n \n file_ctr+=1 #next row in the dictionary array\n print \"processing file no \", file_ctr, \" item no\", item_ctr\n\n #pdb.set_trace()\n self._write_buffer(delimiter) # write rest of the buffer to file\n\n return n", "def tag_counts (count_file):\r\n tagcounts = defaultdict(int)\r\n f = open(count_file, 'r')\r\n for line in f:\r\n fields = line.split()\r\n if fields[1] != 'WORDTAG':\r\n continue\r\n count = int(fields[0])\r\n tag = fields[2]\r\n tagcounts[tag] += count \r\n f.close() \r\n return tagcounts", "def make_iob(txt, ents, etypes):\r\n index = 0\r\n for i in ents:\r\n start = txt.index(i, index) #get the start of the entity\r\n tmp1, tmp2 = txt[:start], txt[start:]\r\n tmp1 += \" eeeeeeeeeeeeeeeeeeee \"\r\n txt = ' '.join([tmp1, tmp2])\r\n index = start + len(i) + len(\" eeeeeeeeeeeeeeeeeeee \")\r\n \r\n line_tokens = word_tokenize(txt)#tokenize the text\r\n \r\n #get the starting positions of the entities\r\n starts = []\r\n try: #in order to handle the last case where list.index doesnt finds anything\r\n while line_tokens.index(\"eeeeeeeeeeeeeeeeeeee\") > -1:\r\n tmp = line_tokens.index(\"eeeeeeeeeeeeeeeeeeee\")\r\n starts.append(tmp)\r\n del line_tokens[tmp]\r\n except ValueError:\r\n pass\r\n \r\n line_iob = ['O'] * len(line_tokens)# the iob tags of the whole text\r\n \r\n for i in range(0, len(ents)):\r\n #tokenize the entities\r\n entity_tokens = word_tokenize(ents[i])\r\n tmp = 'I-'+etypes[i]\r\n entity_iob = [tmp] * len(entity_tokens)\r\n entity_iob[0] = \"B-\" + etypes[i]\r\n \r\n #make changes to the iob tags to match the entities\r\n for j in range(0, len(entity_iob)):\r\n line_iob[starts[i] + j] = entity_iob[j]\r\n \r\n #the format is: token IOB-etypes\r\n for i in range(0, len(line_tokens)):\r\n output.write(\"{}\\t{}\\n\".format(line_tokens[i], line_iob[i]))\r\n output.write('\\n')#new document\r", "def creaLE(venta): #Esta sección fue hecha por Ángel\n listaPGA = [] # Esto genera la lista necesaria para pasarlo al archivo\n for elemento in venta:\n listaN = elemento[0] + \",\"\n listaN += str(elemento[1]) + \"\\n\"\n listaPGA.append(listaN)\n return listaPGA", "def writeVOC(b, ld, f):\n of = os.path.join(ld, f[: f.rfind('.')] + \".txt\")\n with open(of, \"w\") as fh:\n dname = os.path.dirname(os.path.dirname(of))\n fh.write(\"\"\"<annotation>\n <folder>%s</folder>\n <filename>%s</filename>\n <source>\n <database>The NVIDIA AI City 2017 dataset</database>\n <annotation>PASCAL VOC2007</annotation>\n \n </source>\n <size>\n <width>%d</width>\n <height>%d</height>\n <depth>3</depth>\n </size>\n <segmented>0</segmented>\n\"\"\" % (dname, f, nwidth, nheight))\n for r in b:\n fh.write(\"\"\" <object>\n <name>%s</name>\n <bndbox>\n <xmin>%d</xmin>\n <ymin>%d</ymin>\n <xmax>%d</xmax>\n <ymax>%d</ymax>\n </bndbox>\n </object>\n\"\"\" % ( r[0], int(r[1]*dw), int(r[2]*dh), int(r[3]*dw), int(r[4]*dh) ))\n fh.write(\"</annotation>\")", "def get_pos_tags(blob):\n return blob.pos_tags", "def countTagsInFile(fname):\n with open(fname, 'r', encoding='utf-8') as f:\n for line in f:\n words = line.split(' ')\n for w in words:\n tag = w.split('_')[1].rstrip()\n cat = tag[0].upper()\n if tag not in dictionaries[cat]:\n dictionaries[cat][tag] = 1\n else:\n dictionaries[cat][tag] += 1", "def defaults(file):\n\n\tUNCAT_TAGID = 47\n\tNOSERIES_TAGID = 375\n\n\treturn [NOSERIES_TAGID, UNCAT_TAGID]", "def tous_les_codages(nom_de_fichier1,nom_de_fichier2):\n with open(nom_de_fichier1,'r') as f :\n texte = f.read()\n for clef in range(26):\n texte_code = codage_texte(texte,clef)\n with open(nom_de_fichier2+'_{}'.format(transforme(clef))+'.txt','w') as f :\n f.write(texte_code)\n return None", "def read_tagged_word_list(filename):\n # TODO: write and test this method\n print 'reading tagged file'", "def overwrite(fstack: List[Tuple[str,int]]) -> ():\n filename, line_num = fstack.pop()\n tmp = str() # store our new file in memory\n with open(filename, 'r') as input:\n for i,line in enumerate(input):\n if i + 1 == line_num:\n line = line.replace(\"pub \",\"\",1)\n _, line_num = fstack.pop() if fstack else ('',0)\n tmp += line\n with open(filename, 'w') as newfile:\n newfile.write(tmp)", "def create_xmlfile(images_path, txt_file,class_name):\n\n#\tsavepath = os.path.join(images_path, \"{}_annotations\".format(class_name))\n#\tprint \"savepath:{}\".format(savepath)\n#\tif not os.path.exists(savepath):\n#\t\tos.mkdir(savepath)\n\n\ttxt = open(txt_file, 'r')\n\tfor line in txt:\n#\t\tpdb.set_trace()\n\t\tprint ('line:{}'.format(line))\n\t\twords = line.split(\" \")\n\t\tword_len = len(words)\n\t\tprint('length of words:{}'.format(word_len))\n\t\tprint (\"word_len:{}\".format(word_len))\n\t\t\n\t\tif word_len >3:\n\t\t\ta,b = words[0].split('.')\n\t\t\t\n\t\t\timg_path =a+'.jpg' #words[0]\n\t\t\timg_name =img_path # os.path.basename(img_path)\n\t\t\tprint ('image Name:%s'%img_name)\n\t\t\timg = Image.open('/home/graymatics/py-faster-rcnn/data/violence/'+img_name)\n\t\t\tprint(img)\n\t\t\tw,h = img.size\n\t\t\t#create xml\n\t\t\tannotation = et.Element('annotation')\n\t\t\tet.SubElement(annotation,'folder').text = 'demo'\n\t\t\tet.SubElement(annotation,'filename').text = img_name\n\n\t\t\tsource = et.SubElement(annotation, 'source')\n\t\t\tet.SubElement(source, 'database').text = 'internet'\n\t\t\tet.SubElement(source, 'annotation').text = 'Lyushuen'\n\t\t\tet.SubElement(source, 'image').text = 'unknown'\n\n\t\t\tsize = et.SubElement(annotation, 'size')\n\t\t\tet.SubElement(size, 'width').text = str(w)\n\t\t\tet.SubElement(size, 'height').text =str(h)\n\t\t\tet.SubElement(size, 'depth').text = '3'\n\n\t\t\tet.SubElement(annotation, 'segmented').text = str(0)\n\t for i in range(word_len/4 + 1):\n print (\"I size:{}\".format(i))\n if i == 0:\n print \"Image name is :{}\".format(words[0])\n elif i >= 1:\n index = i - 1\n\n\t\t\t\t\tobj = et.SubElement(annotation, 'object')\n\t\t\t\t\tet.SubElement(obj, 'name').text = class_name #words[5]#class_name\n\t\t\t\t\tet.SubElement(obj, 'pose').text = 'Unspecified'\n\t\t\t\t\tet.SubElement(obj, 'truncated').text = '0'\n\t\t \t\t \tet.SubElement(obj, 'difficult').text = '0'\n\n\t\t \t\t\tbox = et.SubElement(obj, 'bndbox')\n\t\t\t \t\tet.SubElement(box, 'xmin').text = str(int(round(float(words[index*4+1]))))\n\t\t\t \t\tet.SubElement(box, 'ymin').text = str(int(round(float(words[index*4+2]))))\n\t\t\t \t\tet.SubElement(box, 'xmax').text = str(int(round(float(words[index*4+3]))))\n\t\t\t \t\tet.SubElement(box, 'ymax').text = str(int(round(float(words[index*4+4]))))\n\n\t\t #write to file\n\t\t \tname, exten = os.path.splitext(img_name)\n\t\t \tanno_path = os.path.join(src_img,name+'.xml') #path of annotation files\n\t\t\tprint \"anno_path:{}\".format(anno_path)\n\t\t \ttree = et.ElementTree(annotation)\n\t\t \ttree.write(anno_path)\n\ttxt.close()", "def track_info(filename):\n tag = id3.Tag()\n tag.parse(filename)\n a = load(filename)\n print(\"# {}\".format('=' * 78))\n print(\"Track Name: {}\".format(tag.title))\n print(\"Track Artist: {}\".format(tag.artist))\n print(\"Track Album: {}\".format(tag.album))\n print(\"Track Duration: {}\".format(duration_from_seconds(a.info.time_secs)))\n print(\"Track Number: {}\".format(tag.track_num))\n print(\"Track BitRate: {}\".format(a.info.bit_rate))\n print(\"Track BitRate: {}\".format(a.info.bit_rate_str))\n print(\"Sample Rate: {}\".format(a.info.sample_freq))\n print(\"Mode: {}\".format(a.info.mode))\n print(\"# {}\".format('=' * 78))\n print(\"Album Artist: {}\".format(tag.album_artist))\n print(\"Album Year: {}\".format(tag.getBestDate()))\n print(\"Album Recording Date: {}\".format(tag.recording_date))\n print(\"Album Type: {}\".format(tag.album_type))\n print(\"Disc Num: {}\".format(tag.disc_num))\n print(\"Artist Origin: {}\".format(tag.artist_origin))\n print(\"# {}\".format('=' * 78))\n print(\"Artist URL: {}\".format(tag.artist_url))\n print(\"Audio File URL: {}\".format(tag.audio_file_url))\n print(\"Audio Source URL: {}\".format(tag.audio_source_url))\n print(\"Commercial URL: {}\".format(tag.commercial_url))\n print(\"Copyright URL: {}\".format(tag.copyright_url))\n print(\"Internet Radio URL: {}\".format(tag.internet_radio_url))\n print(\"Publisher URL: {}\".format(tag.publisher_url))\n print(\"Payment URL: {}\".format(tag.payment_url))\n print(\"# {}\".format('=' * 78))\n print(\"Publisher: {}\".format(tag.publisher))\n print(\"Original Release Date: {}\".format(tag.original_release_date))\n print(\"Play Count: {}\".format(tag.play_count))\n print(\"Tagging Date: {}\".format(tag.tagging_date))\n print(\"Release Date: {}\".format(tag.release_date))\n print(\"Terms Of Use: {}\".format(tag.terms_of_use))\n print(\"isV1: {}\".format(tag.isV1()))\n print(\"isV2: {}\".format(tag.isV2()))\n print(\"BPM: {}\".format(tag.bpm))\n print(\"Cd Id: {}\".format(tag.cd_id))\n print(\"Composer: {}\".format(tag.composer))\n print(\"Encoding date: {}\".format(tag.encoding_date))\n print(\"# {}\".format('=' * 78))\n print(\"Genre: {}\".format(tag.genre.name))\n print(\"Non Std Genre Name: {}\".format(tag.non_std_genre.name))\n print(\"Genre ID: {}\".format(tag.genre.id))\n print(\"Non Std Genre ID: {}\".format(tag.non_std_genre.id))\n print(\"LAME Tag: {}\".format(a.info.lame_tag))\n print(\"# {}\".format('=' * 78))\n print(\"Header Version: {}\".format(tag.header.version))\n print(\"Header Major Version: {}\".format(tag.header.major_version))\n print(\"Header Minor Version: {}\".format(tag.header.minor_version))\n print(\"Header Rev Version: {}\".format(tag.header.rev_version))\n print(\"Header Extended: {}\".format(tag.header.extended))\n print(\"Header Footer: {}\".format(tag.header.footer))\n print(\"Header Experimental: {}\".format(tag.header.experimental))\n print(\"Header SIZE: {}\".format(tag.header.SIZE))\n print(\"Header Tag Size: {}\".format(tag.header.tag_size))\n print(\"Extended Header Size: {}\".format(tag.extended_header.size))\n print(\"# {}\".format('=' * 78))\n print(\"File Name: {}\".format(tag.file_info.name))\n print(\"File Tag Size: {}\".format(tag.file_info.tag_size))\n print(\"File Tag Padding Size: {}\".format(tag.file_info.tag_padding_size))\n print(\"File Read Only: {}\".format(tag.read_only))\n print(\"File Size: {}\".format(a.info.size_bytes))\n print(\"Last Modified: {}\".format(time.strftime('%Y-%m-%d %H:%M:%S',\n time.localtime(tag.file_info.mtime))))\n print(\"Last Accessed: {}\".format(time.strftime('%Y-%m-%d %H:%M:%S',\n time.localtime(tag.file_info.atime))))\n print(\"# {}\".format('=' * 78))", "def parse_rec(filename):\n\ttree = ET.parse(filename)\n\t# label_ = tree.findall('object')[-1].find('name').text\n\tlabel = tree.find('folder').text\n\n\treturn label", "def update_filename_to_nifti(filename):\n extension = get_file_extension(filename)\n if not \"nii\" in extension:\n filename = filename.replace(extension, \".nii.gz\")\n return filename", "def parseRaw(tagDict, inFileName):\r\n\r\n # '%Y/%m/%d %H:%M:%S' RAW Argos.csv format\r\n\r\n\r\n csvName = path.basename(inFileName)\r\n # Trap argos raw files that occurred within these dates\r\n # date formatted dd/mm/yy instead of yyyy/mm/dd\r\n bd = False\r\n if csvName >= util.CSV_schema.bad_dates[0][0]:\r\n if csvName <= util.CSV_schema.bad_dates[0][1]:\r\n bd = True\r\n if csvName >= util.CSV_schema.bad_dates[1][0]:\r\n if csvName <= util.CSV_schema.bad_dates[1][1]:\r\n bd = True\r\n\r\n newPasses = []\r\n d_ptt = {v[0]:k for k,v in tagDict.items()}\r\n pttDict = OrderedDict(sorted(d_ptt.items())) # Sort into {ptt: tag_id, ....}\r\n del d_ptt\r\n with open(inFileName, 'rb') as inFile:\r\n count = sum(1 for line in inFile)\r\n inFile.seek(0) # reset file\r\n reader = csv.DictReader(inFile)\r\n while reader.line_num < count:\r\n # Trap for changed fieldname\r\n gt = True if util.CSV_schema.gt_names[1] in reader.fieldnames else False\r\n featID = None\r\n ptt = 0\r\n msgType = 'NEW'\r\n str_timeval = ''\r\n passDur = None\r\n for row in reader:\r\n if row['Platform ID No.'][0] =='#': # What is this even trapping ???\r\n continue\r\n if int(row['Platform ID No.']) not in pttDict.keys(): # Orphan Tag\r\n newOrphan(row, inFileName, gt)\r\n msgType = 'NEW'\r\n updatePttList(ptt, row['Msg Date'],bd)\r\n continue\r\n elif int(row['Platform ID No.']) != ptt: # Start New PTT\r\n if ptt: # Skip ptt = 0\r\n tag_id = pttDict[ptt]\r\n dbutil.updateDeployment(conn, tag_id) # Update ptt that just finished\r\n updatePttList(ptt, last_msg, bd)\r\n updateDevice(tag_id, last_msg, bd)\r\n# HOW to update final (Valid) ptt?????\r\n msgType = 'NEW'\r\n # tag specific vars\r\n ptt = int(row['Platform ID No.']) #=integer\r\n tag_id = pttDict[ptt]\r\n pttStart = tagDict.get(tag_id)[1] #=datetimes\r\n pttStop = tagDict.get(tag_id)[2]\r\n animal_id = tagDict.get(tag_id)[4] #=integer\r\n # loop vars\r\n str_timeval = row['Loc. date'] if row['Loc. date'] else row['Msg Date']\r\n timevalue = format_date(str_timeval,bd)\r\n passDur = row['Pass']\r\n sat = row['Sat.']\r\n # Trap out of range date\r\n if timevalue < pttStart:\r\n ptt = 0 # Force new ptt Variables for next row\r\n continue\r\n# ********* NOT TRAPPING stoptime ??\r\n elif timevalue > pttStop:\r\n ptt = 0\r\n continue\r\n # start parsing\r\n last_msg = format_date(row['Msg Date'],bd)\r\n if msgType == 'SAME':\r\n if row['Loc. date']:\r\n if row['Loc. date'] == str_timeval:\r\n if row['Pass'] != passDur:\r\n msgType = 'NEW'\r\n passDur = row['Pass']\r\n sat = row['Sat.']\r\n if row['Sat.'] != sat:\r\n msgType = 'NEW'\r\n sat = row['Sat.']\r\n elif row['Loc. date'] != str_timeval: # Definitely New pass\r\n msgType = 'NEW'\r\n str_timeval = row['Loc. date']\r\n timevalue = format_date(str_timeval,bd)\r\n passDur = row['Pass']\r\n sat = row['Sat.']\r\n else: # row['Loc. date'] empty\r\n if row['Pass'] == '0': # Single pass\r\n msgType = 'NEW'\r\n str_timeval = row['Msg Date']\r\n timevalue = format_date(str_timeval,bd)\r\n passDur = None # OR '0'\r\n sat = row['Sat.']\r\n elif row['Pass'] != '0': # Multi-Z pass\r\n if row['Pass'] != passDur: # still in same pass\r\n msgType = 'NEW'\r\n str_timeval = getPassTime(inFileName,row['Pass'],\r\n str(ptt),\r\n row['Msg Date'][:10])\r\n timevalue = format_date(str_timeval,bd)\r\n passDur = row['Pass']\r\n sat = row['Sat.']\r\n if msgType == 'SAME': #Append: to Transmit\r\n if featID:\r\n transmitID, last_msg = addTransmit(featID, row, bd)\r\n\r\n if msgType == 'NEW': # Append: to Argos & Transmit\r\n featID = addArgos(row, tag_id, animal_id, timevalue, gt, bd)\r\n msgType = 'SAME'\r\n if featID:\r\n print 'Pass at: [{0}] added for {1}'.format(str_timeval, ptt)\r\n newPasses.append(featID)\r\n\r\n return newPasses", "def add_tumor_volume_to_nifty(filename, volume):\n img = nibabel.load(filename)\n img.header[\"descrip\"] = volume\n nibabel.save(img,filename)", "def read_file(path: str, tags: list):\n if not os.path.exists(path):\n raise FileNotFoundError(\"file {} does not exists\".format(path))\n collection = {}\n doc_id = 0\n continue_read = True\n change_marqueur = False\n with open(path, 'r') as file:\n for l in file.readlines():\n\n if l[0] == \".\": # on est face à un marqueur\n marqueur = l[:2]\n if marqueur == \".I\":\n doc_id += 1\n collection[doc_id] = \"\"\n continue\n elif marqueur in tags:\n continue_read = True\n change_marqueur = True\n continue\n else:\n continue_read = False\n continue\n\n if continue_read:\n collection[doc_id] += l.strip()\n if change_marqueur:\n collection[doc_id] += \". \"\n change_marqueur = False\n else:\n collection[doc_id] += \" \"\n\n return collection", "def readDirectory():\n tagdir = \"tagreplacements\"\n data = os.listdir(tagdir)\n for d in data:\n processFile(os.path.join(tagdir,d))\n \n #print(repd)", "def taxonomy_files(self):\n location=self.place.capitalize()+'-'+str(self.year)+'-'\n no_of_ideograms=self.OTU.make_tree(location,self.start_level,self.plot_level)\n return no_of_ideograms", "def postprocess_umi_tag(sam_file, out_bamfile):\n samfile = pysam.AlignmentFile(sam_file, 'r')\n try:\n with pysam.AlignmentFile(out_bamfile, \"wb\", header=samfile.header) as outf:\n # append a tag\n # subset the name field\n # write the line to bam\n for alignment in samfile:\n alignment.set_tag('XM', alignment.qname[:6])\n alignment.qname = alignment.qname[7:] # strip the umi flag from the name\n outf.write(alignment)\n finally:\n samfile.close()", "def __str__(self):\n return \"inifile: \" + self.inifile", "def descwrite(i):\n \n podname = i.title.string\n f = codecs.open(podftxt, encoding='utf-8', mode='w')\n \n f.write(podname)\n f.write(\"\\n\\n\")\n # enclosing in try-exception because of this error\n # TypeError: coercing to Unicode: need string or buffer, Tag found\n try:\n # This is to decode &lt/&gt before writing it to the file\n # BeautifulStoneSoup(items[1].description.string, convertEntities=BeautifulStoneSoup.HTML_ENTITIES).contents[0]\n f.write(BeautifulStoneSoup(i.description.string,\n convertEntities=\n BeautifulStoneSoup.HTML_ENTITIES).contents[0])\n except TypeError: \n f.write(i.description.string)\n \n f.close", "def print_tags(self, filename):\n fh = open(filename, 'w')\n for t in self.source_tags.tags:\n fh.write(\"%d\\t%d\\t%s\" % (t.begin, t.end, t.name))\n for (attr, val) in t.attrs.items():\n fh.write(\"\\t%s=\\\"%s\\\"\" % (attr, val.replace('\"','&quot;')))\n fh.write(\"\\n\")", "def change2postion(chrname,cluster, tag_thr=2):\n package = {}\n package['POSITION_FILE'] = []\n package['NEW_FILE'] = []\n \n cutoff = 1000 / 10\n oceanbegin = 0 # ocean: tag num <= 2\n oceanflag = 1\n \n num = []\n for k in xrange(len(cluster)):\n num.append(cluster[k])\n \n for k in xrange(len(num) - 1):\n if num[k] > tag_thr:\n if oceanflag == 1:\n oceanflag = 0\n if (k - oceanbegin) >= cutoff:\n oceanflag = 0\n for m in xrange(oceanbegin, k):\n num[m] = -1\n \n elif num[k] <= tag_thr and oceanflag == 0:\n oceanbegin = k\n oceanflag = 1\n if oceanflag == 1:\n for m in xrange(oceanbegin, len(num)):\n num[m] = -1\n\n linenum = 0\n islandflag = 0\n islandbegin = 0\n islandline = 0\n for k in xrange(len(num) - 1):\n if islandflag == 0 and num[k] > -1:\n islandflag = 1\n linenum += 1\n islandbegin = k + 1\n islandline = linenum\n package['NEW_FILE'].append(num[k])\n elif islandflag == 1 and num[k] > -1:\n package['NEW_FILE'].append(num[k])\n linenum += 1\n elif islandflag == 1 and num[k] == -1:\n package['POSITION_FILE'].append('\\t'.join([chrname, str(islandbegin * 10 - 9) , str(k * 10 - 9), str(islandline), str(linenum)]))\n islandflag = 0\n\n if islandflag == 1:\n package['NEW_FILE'].append(num[len(num) - 1])\n linenum += 1\n package['POSITION_FILE'].append('\\t'.join([chrname, str(islandbegin * 10 - 9), str(len(num) * 10 - 9), str(islandline), str(linenum)]))\n \n num = []\n return package", "def tag():\n iso_list = []\n tags = [\"spatial_entity\", \"place\", \"motion\", \"location\", \"signal\", \"qslink\", \"olink\"]\n for token in doc:\n if token.norm_ in tags:\n iso_list.append(token.norm_)\n setList = list(set(iso_list))\n my_dict = {i: iso_list.count(i) for i in setList}\n\n for i in tags:\n if i.lower() not in my_dict:\n my_dict[i] = 0\n print(my_dict)", "def split_file(filename):\n \n \n#tree = ElementTree.ElementTree()\n#root = ElementTree.Element(\"root\")\n#a = ElementTree.Element(\"a\")\n#a.text = \"1\"\n#root.append(a)\n#tree._setroot(root)\n#tree.write(\"sample.xml\" \n\n \n find_counter = 0\n check_counter = 0 \n tree_file = files()\n #outfile = next(tree_file)\n \n \n with open(filename,mode =\"r\") as file :\n \n for line in file :\n \n if line.startswith(\"<?xml\"):\n outfile = next(tree_file)\n outfile.write(line)", "def existing_village_file(kovetz):\n try:\n cat77 = nbt.NBTFile(kovetz)\n except IOError:\n raise Exception(\"Hmm. Unfortunately, the file requested does not exist :(\")\n tick4 = cat77['data']['Tick'].value\n return cat77, tick4", "def write_to(self, io):\n out = io\n io, path = tempfile.mkstemp()\n fnlen = Erf.filename_length(self.fversion)\n lstr_iter = iter(sorted(self.localized_strings.items()))\n locstr = []\n for k, v in lstr_iter:\n locstr.append(struct.pack(\"<L L %ds x\" % len(v), k, len(v)+1, v.encode(get_encoding())))\n locstr = b''.join(locstr)\n\n keylist = []\n for i, co in enumerate(self.content):\n pad = 0\n max = len(co.resref)\n if len(co.resref) > fnlen:\n print(\"truncating filename %s, longer than %d\" % (co.resref, fnlen), file=sys.stderr)\n max = fnlen\n else:\n pad = fnlen - len(co.resref)\n\n keylist.append(struct.pack(\"<%ds %dx L h h\" % (len(co.resref), pad),\n co.resref.encode(get_encoding()),\n i, co.res_type, 0))\n keylist = b''.join(keylist)\n\n offset = 160 + len(locstr) + len(keylist) + 8 * len(self.content)\n\n reslist = []\n for co in self.content:\n reslist.append(struct.pack(\"< L L\", offset, co.size))\n offset += co.size\n\n reslist = b''.join(reslist)\n\n offset_to_locstr = 160\n offset_to_keylist = offset_to_locstr + len(locstr)\n offset_to_resourcelist = offset_to_keylist + len(keylist)\n\n header = struct.pack(\"8s LL LL LL LL L 116x\",\n (self.ftype+' '+self.fversion).encode(get_encoding()),\n len(self.localized_strings),\n len(locstr), len(self.content), offset_to_locstr, offset_to_keylist,\n offset_to_resourcelist, self.year, self.day_of_year, self.desc_strref)\n\n os.write(io, header)\n os.write(io, locstr)\n os.write(io, keylist)\n os.write(io, reslist)\n\n for co in self.content:\n os.write(io, co.get())\n\n os.close(io)\n shutil.copy(path, out)\n os.remove(path)", "def fileUnigene():\n \n with open(gene2unigene, 'r') as unigene,\\\n open(UniGene, 'w') as gene:\n \n header_line = next(unigene)\n header_line= header_line.split(\"\\t\")\n \n \n###################################################################################\n# #\n# #\n# Index Value of columns we need # \n# # \n# #\n################################################################################### \n GeneID_index = header_line.index('#GeneID')\n Unigene_index = header_line.index('UniGene_cluster\\n')\n \n for line in tqdm.tqdm(unigene, 'Time for loop of unigeneConversion'):\n lineList= line.split(\"\\t\")\n if (re.match(r\"^[a-zA-Z]{2,3}[.]([0-9]*)$\", lineList[1])):\n gene.write(lineList[GeneID_index] + \"\\tUniGene\\t\" + str(lineList[Unigene_index]))", "def readByte(self) :\n tag = ord(self.minfile[self.pos])\n self.pos += 1\n return tag", "def export_gexf(rotulos,similaridades,nome_arquivo,threshold,excluir_negativos):\n\n tbl = dict.fromkeys(i for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith('P'))\n\n arquivo = codecs.open(nome_arquivo + \".gexf\",\"w\",\"utf-8\")\n arquivo.write('<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n')\n arquivo.write('<gexf xmlns=\"http://www.gexf.net/1.2draft\" version=\"1.2\">\\n')\n arquivo.write('\\t<graph mode=\"static\" defaultedgetype=\"undirected\">\\n')\n arquivo.write('\\t\\t\\t<nodes>\\n')\n arquivo.flush()\n\n cont=0\n docs = list(rotulos.keys())\n for key in docs:\n rotulo = re.sub(r'[<>]', '', rotulos[key].translate(tbl))\n arquivo.write(u\"\\t\\t\\t\\t<node id=\\\"%d\\\" label=\\\"%s\\\"/>\\n\" % (docs.index(key), rotulo))\n\n cont = cont+1\n if cont == 50:\n arquivo.flush()\n cont = 0\n\n arquivo.write('\\t\\t\\t</nodes>\\n')\n arquivo.write('\\t\\t\\t<edges>\\n')\n arquivo.flush()\n\n cont=0\n for similaridade in similaridades:\n if(excluir_negativos and (similaridade[2] < 0)):\n continue\n\n if abs(similaridade[2]) >= threshold:\n arquivo.write(\"\\t\\t\\t\\t<edge source=\\\"%d\\\" target=\\\"%d\\\" weight=\\\"%f\\\" />\\n\" % (docs.index(similaridade[0]),docs.index(similaridade[1]),similaridade[2]))\n\n cont = cont+1\n if cont == 50:\n arquivo.flush()\n cont = 0\n\n arquivo.write('\\t\\t\\t</edges>\\n')\n arquivo.write('\\t</graph>\\n')\n arquivo.write('</gexf>')\n arquivo.close() # you can omit in most cases as the destructor will call it", "def read_file(self, file_path): \n logging.info('Lendo arquivo de {0}'.format(file_path))\n file_with_tags = open(file_path, \"r\", encoding='utf-8')\n return file_with_tags.readlines()", "def expand(self, sourcefile):\n with open(sourcefile, 'rb') as src_file: # Öffne die zu expandierende Datei\n if src_file.read(3) == b'rl3': # Wenn sie eine RL3 Datei ist\n extension_counter = src_file.read(1) # Lese die Anzahl der Bytes der Endung aus\n extension_orig = src_file.read(\n int.from_bytes(extension_counter, 'big')) # Lese die Endung auf Basis der Anzahl aus\n outputfile = os.path.splitext(sourcefile)[0] # Splitte den Dateinamen vom Pfad\n if os.path.isfile(\n outputfile + \".\" + extension_orig.decode(\"utf-8\")): # Überprüfe ob die Datei existiert\n number = 1 # Setz Dateinummer auf eins\n while os.path.isfile(outputfile + str(number) + \".\" + extension_orig.decode(\n \"utf-8\")): # Wiederhohle solange bis die Datei nicht existiert\n number += 1 # Erhöhe die Dateinummer\n outputfile += str(number) # Füge dem Dateiname die Nummer hinzu\n outputfile += \".\" + extension_orig.decode(\"utf-8\") # Füge dem Dateinamen die Endung hinzu\n with open(outputfile, 'wb') as dest_file: # Öffne die Zieldatei\n chunk = src_file.read(self.chunk_size) # Lese die Bytes aus\n counter = False # Aktuelles Byte ist keine Zähler\n value = False # Aktuelles Byte ist nicht der Wert\n count = 0 # Null Wiederhohlungen vom Wert\n while chunk: # Solange Bytes da sind\n for byte in chunk: # Gehe durch jedes Byte\n if byte == ord(\n self.MARKER) and not counter and not value: # Wenn das Byte ein Markierungszeichen ist und Zähler und Wert nicht aktiv sind\n counter = True # Aktiviere den Zähler\n elif counter: # Wenn der Zähler aktiv ist\n if byte == 0: # Wenn das aktuelle Byte null ist\n dest_file.write(ord(self.MARKER).to_bytes(1, 'big')) # Schreibe den Marker\n counter = False # Desktiviere den Zähler\n else: # Sonst\n count = byte # Setze die Anzahl auf den Wert des Bytes\n counter = False # Deaktiviere den Zähler\n value = True # Aktiviere den Wert\n elif value: # Wenn der Wert aktiv ist\n for i in range(count + (self.MAXBYTES - 255)): # Für die Aazahl im Zähler\n dest_file.write(byte.to_bytes(1, 'big')) # Schreibe die Bytes\n value = False # Deaktiviere den Wert\n else: # Sonst\n dest_file.write(byte.to_bytes(1, 'big')) # Schreibe das Byte\n chunk = src_file.read(self.chunk_size) # Lese neue Bytes ein\n if counter: # Wenn der Zähler aktiv ist\n dest_file.write(ord(self.MARKER).to_bytes(1, 'big')) # Schreibe den Marker\n else: # Sonst\n raise RLedError # Werfe den RLedError", "def main():\n token_dict_dict = {}\n all_dict = {}\n pronoun_proportion_list = []\n tag = 'PRP' # base tag for all pronouns, see 'https://www.clips.uantwerpen.be/pages/MBSP-tags' for more info\n\n for text in glob.glob(file_loc):\n file_title = os.path.basename(text).split('.')[0]\n\n with open(text, 'r') as f:\n speech = f.read()\n text_dict = {}\n\n try:\n #TextBlob goodness that tags all the words for me\n speech_blob = TextBlob(clean(speech))\n speech_blob.tags\n except:\n #for some reason Trump's address contained a unicode 128 character that I couldn't find\n #instead of getting rid of it in a single file, i decided to have an except that could catch that case in\n #all sitations and handle them accordingly\n\n #lets the user know that there was an issue, and that it's been handled\n print file_title,\n print \"contains unexpected unicode characters. they have been removed and the document has been processed\"\n\n #gets rid of all unicode characters. i could do this by default, but all the other files ran fine\n #so i didn't think it was worth it\n speech_blob = TextBlob(clean(speech.decode('unicode_escape').encode('ascii','ignore')))\n\n for token in speech_blob.tags:\n # builds the inital dictionary of data, only looks at words with a specified tag\n if tag in token[1]:\n try:\n text_dict[token[0]] += 1\n except:\n text_dict[token[0]] = 1\n try:\n all_dict[token[0]] += 1\n except:\n all_dict[token[0]] = 1\n #breaks the title into 3 pieces: number, president, date\n token_dict_dict[file_title] = text_dict\n partial_split, date = string.rsplit(file_title, '_', 1)\n num_pres, pres = string.split(partial_split, '_', 1)\n\n pronoun_proportion_list.append(\n (pres, date, total_to_proportion(pronoun_breakdown(token_dict_dict[file_title])))\n )\n create_pronoun_graph(sort_list_by_president_order(pronoun_proportion_list))", "def test_observable_file_add(self):\n with tempfile.NamedTemporaryFile('wb', delete=False) as f:\n f.write(b\"content\")\n filename = f.name\n fileinfo = self.api.observable_file_add(filename, ['file_tag'])\n os.remove(filename)\n # SHA256 of \"content\"\n expected_filename = \"FILE:ed7002b439e9ac845f22357d822bac1444730fbdb6016d3ec9432297b9ec9f73\"\n self.assertEqual(fileinfo[0]['value'], expected_filename)\n tags = [t['name'] for t in fileinfo[0]['tags']]\n self.assertEqual(tags, ['file_tag'])", "def load_Bietenholz(path=data_path+\"Table1_complete_ascii.txt\"):\n\n res = {}\n num_of_SN = 0\n ex = \"\"\n\n with open(path, 'r') as f:\n for i in range(30):\n next(f)\n for line in f:\n words = line.split()\n current_SN_name = words[0]\n # determine if it's a new SN\n if current_SN_name != ex:\n if num_of_SN > 0:\n res[ex] = SN # save previous SN\n SN = SuperNova()\n num_of_SN += 1\n ex = words[0]\n\n SN.name = words[0]\n if ('L' in line[10]):\n SN.is_limit = np.append(SN.is_limit, True)\n else:\n SN.is_limit = np.append(SN.is_limit, False)\n SN.year = np.append(SN.year, int(line[12:16]))\n SN.month = np.append(SN.month, int(line[17:19]))\n SN.day = np.append(SN.day, float(line[20:25]))\n SN.telescope = np.append(SN.telescope, line[26:33])\n SN.freq = np.append(SN.freq, float(line[35:40]))\n SN.flux = np.append(SN.flux, float(line[41:49]))\n SN.dflux = np.append(SN.dflux, float(line[50:56]))\n SN.comment = np.append(SN.comment, line[57:63])\n res[words[0]] = SN\n return res", "def template_village_file(tick):\n cat = nbt.NBTFile()\n cat2 = cat['data'] = nbt.TAG_Compound()\n cat2[\"Villages\"] = nbt.TAG_List(Banana)\n cat2['Tick'] = nbt.TAG_Int(tick)\n return cat", "def extract_data(file_ner,file_pos,separator=\" \"):\n\n # read NER and POS from the two files\n words_tags=read_conll_file(file_ner)\n words_pos=read_conll_file(file_pos)\n \n ## some checks, e.g., that both files have same length, same tokens\n assert(len(words_tags)==len(words_pos))\n \n for (words,tags),(_,pos) in zip(words_tags,words_pos):\n for word,pos,tag in zip(words,pos,tags):\n # first letter is capitalized\n cap=\"+\" if word[0].isupper() else \"-\"\n hyphen = '+' if '-' in word else '-'\n l = str(len(word))\n #vowels = \"\".join(sorted([w for w in word.lower() if w in ['a','e','i','o','u','y']]))\n #################################\n ###### YOUR FEATURES HERE ####### \n #################################\n # 0=separator\n \n ## todo: output the cap feature and more \n ## make sure the format you output here is what the nerfeats.py script expects as fields!\n print separator.join([word.lower(),pos,cap, l, hyphen, tag])\n # sentence separator\n print \"\"", "def files_of(mage_id):\n\tcursor.execute('select tag.id, tag.name from tag, tagging as is_file_tagging, tagging where tag.id = tagging.child_id and tag.id = is_file_tagging.child_id and is_file_tagging.parent_id = ? and tagging.parent_id = ?', (settings['file'],mage_id))\n\treturn cursor.fetchall()", "def process_tags(filename):\n keys = {\"lower\": 0, \"lower_colon\": 0, \"problemchars\": 0, \"other\": 0}\n for _, element in ET.iterparse(filename):\n keys = key_type(element, keys)\n\n return keys", "def carrega_endereco_tag_gne(self, tag):\r\n gne_dict = {\"EMPPK\":\"CONFIGURACAO\\EmpPK\",\r\n \"EMPCK\":\"CONFIGURACAO\\EmpCK\",\r\n \"MODELO\":\"CONFIGURACAO\\Modelo\",\r\n \"TIPONF\":\"CONFIGURACAO\\TipoNF\",\r\n \"TIPOAMBIENTE\":\"CONFIGURACAO\\TipoAmbiente\",\r\n \"TOKENSEFAZ\":\"CONFIGURACAO\\TokenSefaz\",\r\n \"IMPRESSAOCOMPLETA\":\"CONFIGURACAO\\ImpressaoCompleta\",\r\n \"CUF\":\"IDE\\cUF\",\r\n \"NNF\":\"IDE\\\\nNF\",\r\n \"SERIE\":\"IDE\\Serie\",\r\n \"CMUNFG\":\"IDE\\cMunFG\",\r\n \"INDPRES\":\"IDE\\indPres\",\r\n \"CNPJ\":\"EMIT\\CNPJ\",\r\n \"XNOME\":\"EMIT\\\\xNome\",\r\n \"XLGR\":\"EMIT\\ENDEREMIT\\\\xLgr\",\r\n \"NRO\":\"EMIT\\ENDEREMIT\\Nro\",\r\n \"XBAIRRO\":\"EMIT\\ENDEREMIT\\\\xBairro\",\r\n \"CMUN\":\"EMIT\\ENDEREMIT\\cMun\",\r\n \"XMUN\":\"EMIT\\ENDEREMIT\\\\xMun\",\r\n \"UF\":\"EMIT\\ENDEREMIT\\UF\",\r\n \"CEP\":\"EMIT\\ENDEREMIT\\CEP\",\r\n \"IE\":\"EMIT\\IE\",\r\n \"IMPRIMIR\":\"NFCE\\MSGPROMOCIONAL\\Imprimir\",\r\n \"TITULO\":\"NFCE\\MSGPROMOCIONAL\\Titulo\",\r\n \"MSGIMPOSTO\":\"NFCE\\MsgLeiDoImposto\"\r\n }\r\n\r\n if tag not in gne_dict:\r\n raise Exception(\"-40: Tag XML GNE_Framework nao encontrada.\")\r\n return gne_dict[tag]", "def change2postion_saveas_wig(wigfile,chrname, cluster, tag_thr=2):\n package = {}\n \n cutoff = 1000 / 10\n oceanbegin = 0 # ocean: tag num <= 2\n oceanflag = 1\n \n num = []\n for k in xrange(len(cluster)):\n num.append(cluster[k])\n \n #put a header for each chromosome\n print >>wigfile,\"track type=wiggle_0\\nvariableStep chrom=%s span=%d\" %(chrname,10)\n \n for k in xrange(len(num) - 1):\n if num[k] > tag_thr:\n if oceanflag == 1:\n oceanflag = 0\n if (k - oceanbegin) >= cutoff:\n oceanflag = 0\n for m in xrange(oceanbegin, k):\n num[m] = -1\n \n elif num[k] <= tag_thr and oceanflag == 0:\n oceanbegin = k\n oceanflag = 1\n if oceanflag == 1:\n for m in xrange(oceanbegin, len(num)):\n num[m] = -1\n\n linenum = 0\n islandflag = 0\n islandbegin = 0\n islandline = 0\n for k in xrange(len(num) - 1):\n if islandflag == 0 and num[k] > -1:\n islandflag = 1\n linenum += 1\n islandbegin = k + 1\n islandline = linenum\n print >>wigfile, \"%d\\t%d\" %(islandbegin*10-9,num[k])\n \n elif islandflag == 1 and num[k] > -1:\n linenum += 1\n print >>wigfile, \"%d\\t%d\" %(k*10+1,num[k])\n elif islandflag == 1 and num[k] == -1:\n islandflag = 0\n\n if islandflag == 1:\n linenum += 1\n print >>wigfile, \"%d\\t%d\" %(len(num)*10-9,num[len(num)-1])\n \n num = []", "def create_tags(tag_dict, o_tree):\n for i, o in tag_dict.items():\n subtag1 = o_tree.find(o[0])\n subtag2 = etree.Element(i)\n subtag1.addnext(subtag2)\n o_tree.write(f'{output_path}/ppt/presentation.xml', pretty_print=True, xml_declaration=True, encoding='UTF-8', standalone=True)\n \n return", "def concatenate_detected_verified(fasta_name, PATH_FASTA_DETECTED, PATH_FASTA_VERIFIED, INFO_folder, PATH_FASTA_CONCATENATED):\n\n\tprint \"\\n#################\"\n\tprint \"# Concatetaned file\"\n\tprint \"#################\\n\"\n\n\t# NOTE Dictionaire avec en clef l'id espèce/système et en value une liste\n\t# NOTE [\"l'id espèce/système du verifié qui correspond\", [liste des sequences ATPase, IM ...]]\n\tdict_remove = {}\n\n\tprint \"\\n------------------------------------------\"\n\tprint \"| First read : Creation of the dictionnary\"\n\tprint \"------------------------------------------\\n\"\n\n\tfor fasta_file in fasta_name :\n\t\tverified_fasta=os.path.join(PATH_FASTA_VERIFIED, fasta_file)\n\t\tdetected_fasta=os.path.join(PATH_FASTA_DETECTED, fasta_file)\n\t\tconcatenated_fasta=os.path.join(PATH_FASTA_CONCATENATED, fasta_file)\n\n\t\tlist_seq_verified = list(SeqIO.parse(verified_fasta, \"fasta\"))\n\t\tlist_id_verified = [seq.id for seq in list_seq_verified]\n\t\tlist_seq_verified = [seq.seq for seq in list_seq_verified]\n\n\t\tseq_parser = SeqIO.parse(detected_fasta, \"fasta\")\n\t\tnumber_seq = len(list(seq_parser))\n\t\tprogression = 1\n\n\t\tseq_parser = SeqIO.parse(detected_fasta, \"fasta\")\n\n\t\t# IDEA Il faut tester au moins une fois pour voir si lors de la concatenation, je ne me retrouve pas avec des systems ou je n'ai pas tous enlevé. Exemple l'ATPase de X n'est pas la même que celle de Y mais l'IMplatform l'ai si c'est le cas X est a enlevé aussi pour son ATPase\n\t\t# IDEA Si idea précédente vrai alors il faut faire des fichiers temporaires des sequences que l'on garde et concatener par \"cat\" à la fin le fichier temporaire et son homonyme en verifié.\n\n\t\t# NOTE Il y avait un problème : le nom/id de l'epèce + système ne doit pas contenir le _NumX_ car ce Num fait référence au nombre de duplicat de la protéine (exemple deux ATPase gspE)\n\t\t# NOTE Quelques systèmes on des sequences qui sont similaire pour toutes les protéines sauf une exemple ESCO3 et NC_011993 qui sont identique pour tous sauf ATPase (98% seulement)\n\n\t\tfor seq in seq_parser :\n\n\t\t\tsys.stdout.write(\"File : {} -> {:.2f}% : {}/{} sequences detected read\\r\".format(fasta_file, progression/float(number_seq)*100, progression,number_seq))\n\t\t\tsys.stdout.flush()\n\t\t\tprogression += 1\n\n\t\t\tid_seq=seq.id.split(\"_\")\n\t\t\tid_seq=re.sub(\"Num[0-9]_\", \"\", \"_\".join(id_seq[:id_seq.index(\"D\")]))\n\n\t\t\tif id_seq in dict_remove :\n\t\t\t\tcontinue\n\n\t\t\telif seq.seq in list_seq_verified :\n\t\t\t\tindex=list_seq_verified.index(seq.seq)\n\n\t\t\t\tid_seq_verif = list_id_verified[index].split(\"_\")\n\t\t\t\tid_seq_verif = re.sub(\"Num[0-9]_\", \"\", \"_\".join(id_seq_verif[:id_seq_verif.index(\"V\")]))\n\n\t\t\t\t# NOTE dans le dictionnaire je met le système vérifié en premier, toutes les séquences du système identitique en deuxième et la séquence qui en est la cause en troisème\n\t\t\t\tdict_remove[id_seq]=[id_seq_verif,[], seq.id]\n\n\t\tprint\n\t\tprint(\"File : {} -> Done!\".format(fasta_file))\n\n\tprint \"\\n-----------------------------\"\n\tprint \"| Second read : Writing files\"\n\tprint \"-----------------------------\\n\"\n\n\tfor fasta_file in fasta_name :\n\t\tverified_fasta=os.path.join(PATH_FASTA_VERIFIED, fasta_file)\n\t\tdetected_fasta=os.path.join(PATH_FASTA_DETECTED, fasta_file)\n\t\tconcatenated_fasta=os.path.join(PATH_FASTA_CONCATENATED, fasta_file)\n\n\t\tos.system('cat \"{}\" > \"{}\"'.format(verified_fasta, concatenated_fasta))\n\n\t\tseq_parser = SeqIO.parse(detected_fasta, \"fasta\")\n\t\tnumber_seq = len(list(seq_parser))\n\t\tprogression = 1\n\n\t\tseq_parser = SeqIO.parse(detected_fasta, \"fasta\")\n\n\t\twith open(concatenated_fasta, \"a\") as w_file :\n\t\t\tfor seq in seq_parser :\n\n\t\t\t\tsys.stdout.write(\"File : {} -> {:.2f}% : {}/{} sequences detected read\\r\".format(fasta_file, progression/float(number_seq)*100, progression,number_seq))\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tprogression += 1\n\n\t\t\t\tid_seq=seq.id.split(\"_\")\n\t\t\t\tid_seq=re.sub(\"Num[0-9]_\", \"\", \"_\".join(id_seq[:id_seq.index(\"D\")]))\n\n\t\t\t\tif id_seq in dict_remove :\n\t\t\t\t\tdict_remove[id_seq][1].append(seq)\n\n\t\t\t\telse :\n\t\t\t\t\tSeqIO.write(seq, w_file, \"fasta\")\n\t\tprint\n\t\tprint(\"File : {} -> Done!\".format(fasta_file))\n\n\t# NOTE Dict remove complete and all concatenate write\n\twrite_remove_concatenate(dict_remove, INFO_folder)\n\n\treturn", "def load_annos(self, anno_path):\n\n if os.path.exists(anno_path) is False or os.path.isfile(anno_path) is False or anno_path.endswith('txt') is False:\n print(\"Wrong path: not exist or not a txt file: %s\" % anno_path)\n return None, None\n\n list_file_id, list_anno_id = [], []\n list_x, list_y, list_w, list_h = [], [], [], []\n list_blur, list_expr, list_illum, list_occ, list_pose, list_inval = [], [], [], [], [], []\n anno_id = 0\n\n list_id = []\n list_filename = []\n file_id = 0\n\n num_annos_total = 0\n\n with open(anno_path) as afile:\n line = \"begin\"\n while line != \"\":\n line = afile.readline()\n\n if line.rstrip().endswith('jpg'): # it is a file\n file_name = line.strip()\n list_id.append(file_id)\n list_filename.append(file_name)\n\n num_annos = int(afile.readline().strip())\n\n for i in range(num_annos):\n px, py, pw, ph, blur, expr, illum, inval, occ, pose = afile.readline().strip().split(' ')\n px, py, pw, ph = int(px), int(py), int(pw), int(ph)\n\n if pw == 0 or ph == 0: # ignore invalid faces (0 width or height)\n continue\n\n if pw < 0:\n px = px+pw\n pw = abs(pw)\n if ph < 0:\n py = py+ph\n ph = abs(ph)\n\n list_file_id.append(file_id)\n list_anno_id.append(anno_id)\n list_x.append(px)\n list_y.append(py)\n list_w.append(pw)\n list_h.append(ph)\n list_blur.append(int(blur))\n list_expr.append(int(expr))\n list_illum.append(int(illum))\n list_occ.append(int(occ))\n list_pose.append(int(pose))\n list_inval.append(int(inval))\n anno_id = anno_id + 1\n\n file_id = file_id + 1\n num_annos_total += num_annos\n\n files = {'id': np.array(list_id), 'filename': list_filename }\n annos = {'file_id': np.array(list_file_id), 'anno_id': np.array(list_anno_id), \\\n 'x': np.array(list_x), 'y': np.array(list_y), \\\n 'w': np.array(list_w), 'h': np.array(list_h), \\\n 'blur': np.array(list_blur), 'expression': np.array(list_expr), \\\n 'illumination': np.array(list_illum), 'occlusion': np.array(list_occ), \\\n 'pose': np.array(list_pose), 'invalid': np.array(list_inval) }\n\n assert (len(list_id) == len(list_filename)), \\\n \"file_id and filename lists should have the same length\"\n\n self._num_annos = num_annos_total\n self._num_images = file_id\n\n return files, annos", "def _read_tags(dct_node):\n str_text = dct_node[ATT_TEXT]\n\n bln_mode = False\n str_point = str_text\n\n # and then digest all tags, right to left, eating terminal tags.\n str_s_point = str_point.rstrip()\n i_end = len(str_s_point)\n lst_keys = []\n lst_not_duplicate = []\n rgx_tag = re.compile(RGX_TP_TAG)\n lst_matches = [_ for _ in rgx_tag.finditer(str_s_point)]\n for o_match in lst_matches:\n str_key = o_match.group(2)\n # Valid key assignment ? or a duplicate ?\n if str_key not in lst_keys:\n lst_keys.append(str_key)\n var_value = o_match.group(3)\n if var_value != None: #treat simple keys as boolean flags\n dct_node[ATT_TAGS][str_key] = var_value\n else:\n dct_node[ATT_TAGS][str_key] = ''\n lst_not_duplicate.append(True)\n else:\n lst_not_duplicate.append(False)\n\n # and now shed any string of non-duplicate tags from the end\n for i in reversed(range(len(lst_matches))):\n o_match = lst_matches[i]\n if lst_not_duplicate[i]:\n if i_end == o_match.end():\n i_end = o_match.start()\n else:\n break\n else:\n break\n\n\n # store any keys in textual order,\n lng_keys = len(lst_keys)\n if lng_keys:\n if lng_keys > 1:\n dct_node[ATT_TAG_NAMES] = lst_keys\n else:\n dct_node[ATT_TAG_NAMES] = lst_keys\n # and assign any remaining text\n if bln_mode or lng_keys:\n dct_node[ATT_TEXT] = str_s_point[0:i_end]", "def convert_to_nbest_format(infname,outfname):\n with codecs.open(infname,'r','utf-8') as infile: \n with codecs.open(outfname,'w','utf-8') as outfile: \n for n,line in enumerate(iter(infile)):\n outfile.write( u'{} ||| {} ||| {} ||| {}\\n'.format( n, line.strip(), \n u'Distortion0= 0 LM0= 0 WordPenalty0= 0 PhrasePenalty0= 3 TranslationModel0= 0 0 0 0', u'0' ) )", "def tag_file_chooser(self):\n filename_list = tk.filedialog.askopenfilenames()\n self._tag_path_var.set(filename_list)", "def parse(self):\n with open(self.ofilename_pos, \"w\") as ofile_pos,\\\n open(self.ofilename_neg, \"w\") as ofile_neg:\n \n for (count, (code, header, sequence)) in enumerate(\n self.next_sequence()):\n # pick the right output file\n if code in self.codes: ofile = ofile_pos\n else: ofile = ofile_neg\n ofile.write(f\"{header}\\n{sequence}\\n\")", "def mainloop(sortedbedfile, par = {}):\n \n chrold = ''\n cluster = []\n plus = []\n wigfile = open(par['OUTFILE'], 'w')\n #outfilename=os.path.join(os.path.split(sortedbedfile)[0],os.path.split(sortedbedfile)[1].split('.bed')[0]+'.wig')\n #wigfile = open(outfilename, 'w')\n #wigfile = open(par['OUTFILE'], 'w')\n #wigfile=open(par['INFILE'].split('.')[0]+'_wd.wig','w')\n tag_thr = int(par['TAG_THR'])\n \n for line in open(sortedbedfile).xreadlines():\n if not line[:3] == 'chr':\n continue\n line = line.strip().split()\n if not chrlength[par['SPENAME']].has_key(line[0]):\n continue\n \n if chrold == '':\n chrold = line[0]\n print >>sys.stderr, 'Reading for', chrold, '......', time.asctime()\n if chrlength[par['SPENAME']][line[0]] % 10 == 0:\n cluster = [0] * (chrlength[par['SPENAME']][line[0]] / 10) # Tag number (10 bp space)\n plus = [0] * (chrlength[par['SPENAME']][line[0]] / 10) # Tag number in plus strand (10 bp space)\n else:\n cluster = [0] * (chrlength[par['SPENAME']][line[0]] / 10 + 1)\n plus = [0] * (chrlength[par['SPENAME']][line[0]] / 10 + 1)\n \n elif line[0] != chrold:\n print >>sys.stderr, 'Changing for', chrold, '......', time.asctime()\n package = {}\n package = change2postion(chrold, cluster, tag_thr) # package['position_file'], package['new_file']\n# change2postion_saveas_wig(wigfile,chrold, cluster)\n ##################################\n # test\n ##################################\n #print >>sys.stderr, 'writing for', chrold, '......', time.asctime()\n #filetmp = open(chrold + '.new', 'w')\n #for c in package['new_file']:\n # print >>filetmp, c\n #filetmp.close()\n #filepos = open(chrold + '.pos', 'w')\n #for p in package['position_file']:\n # print >>filepos, p\n #filepos.close()\n #peakregions = []\n #for m in open('/home/liulab/yzhang/ChIP_seq/num_pos/nucleosome_cluster_75/Nucleosome_chr1.cluster_10dec_wavelet.bed').xreadlines():\n # if m[:3] == 'chr':\n # peakregions.append(m.strip())\n ##################################\n \n ##################################\n # to be finished\n ##################################\n if par['WANT_DENOISE'] == 'yes':\n print >>sys.stderr, 'Denoising for', chrold, '......', time.asctime()\n denoised = denoiseChIPSeq(package['NEW_FILE'], package['POSITION_FILE'], par)\n else:\n denoised = package['NEW_FILE']\n \n print>>sys.stderr, 'Saving as a wig file', chrold, '......', time.asctime()\n change2wig(wigfile,package['POSITION_FILE'],denoised)\n\n package = {}\n peakregions = []\n peakregions_filtered = []\n ##################################\n chrold = line[0]\n print >>sys.stderr, 'Reading for', chrold, '......', time.asctime()\n \n if chrlength[par['SPENAME']][line[0]] % 10 == 0:\n cluster = [0] * (chrlength[par['SPENAME']][line[0]] / 10) # Tag number (10 bp space)\n plus = [0] * (chrlength[par['SPENAME']][line[0]] / 10) # Tag number in plus strand (10 bp space)\n else:\n cluster = [0] * (chrlength[par['SPENAME']][line[0]] / 10 + 1)\n plus = [0] * (chrlength[par['SPENAME']][line[0]] / 10 + 1)\n \n try:\n if line[5] == '+': # Tag in plus strand\n b = int(line[1]) + int(par['SHIFT'])\n e = b + int(par['EXTENSION'])\n if (max(b, 1) - 1) / 10 == 0:\n beginpos = max(b, 1)\n else:\n beginpos = (max(b, 1) / 10 + 1) * 10 + 1\n for k in xrange(beginpos, min(e, chrlength[par['SPENAME']][line[0]]), 10):\n cluster[(k - 1) / 10] += 1\n plus[(k - 1) / 10] += 1\n \n elif line[5] == '-': # Tag in minus strand\n e = int(line[2]) - int(par['SHIFT'])\n b = e - int(par['EXTENSION'])\n if (max(b, 1) - 1) / 10 == 0:\n beginpos = max(b, 1)\n else:\n beginpos = (max(b, 1) / 10 + 1) * 10 + 1\n for k in xrange(beginpos, min(e, chrlength[par['SPENAME']][line[0]]), 10):\n cluster[(k - 1) / 10] += 1\n else:\n continue\n except:\n print >> sys.stderr, 'Tag position file error: ', sys.exc_info()[0], sys.exc_info()[1]\n sys.exit()\n \n print >>sys.stderr, 'changing for', chrold, '......', time.asctime()\n package = change2postion(chrold, cluster, tag_thr) # package['position_file'], package['new_file']\n# change2postion_saveas_wig(wigfile,chrold, cluster)\n ##################################\n # test\n ##################################\n #print >>sys.stderr, 'writing for', chrold, '......', time.asctime()\n #filetmp = open(chrold + '.new', 'w')\n #for c in package['new_file']:\n # print >>filetmp, c\n #filetmp.close()\n #filepos = open(chrold + '.pos', 'w')\n #for p in package['position_file']:\n # print >>filepos, p\n #filepos.close()\n ##################################\n \n ##################################\n # to be finished\n ##################################\n if par['WANT_DENOISE'] == 'yes':\n print >>sys.stderr, 'Denoising for', chrold, '......', time.asctime()\n denoised = denoiseChIPSeq(package['NEW_FILE'], package['POSITION_FILE'], par)\n else:\n denoised = package['NEW_FILE']\n\n print>>sys.stderr, 'Saving as a wig file', chrold, '......', time.asctime()\n change2wig(wigfile,package['POSITION_FILE'],denoised)\n\n package = {}\n peakregions = []\n peakregions_filtered = []\n ##################################\n cluster = []\n plus = []\n wigfile.close()", "def intf_ENTCHTAG(E):\n if ( not inc.entid_or_LST_of_entids(E.The,3) or\n not inc.TXT(E,2) or not inc.TXT(E,1) ):\n print(\"Input Error: chtag\")\n print(intf_ENTCHTAG.__doc__)\n return # Without doing much of anything.\n refreshview= False # No need unless view attributes (@) have been affected.\n newtag= E.The.StackPop().val\n oldtag= E.The.StackPop().val\n myeids= E.The.StackPop().val\n if type(myeids)==type(list()):\n #myeids= map(lambda x:x.val, myeids) # Should now be a list of VALs.\n myeids= [x.val for x in myeids] # Should now be a list of VALs.\n else:\n myeids= [ myeids ] # Also a (1 item) list of ints.\n for myeid in myeids:\n if myeid in MMEL.El: # Check if eid exists.\n if MMEL.El[myeid].has_tag(oldtag):\n print(\"Untagging entity #%d with tag ''%s''\" % (myeid,oldtag))\n MMEL.El[myeid].del_tag(oldtag)\n MMEL.El[myeid].add_tag(newtag)\n if '@' in oldtag or '@' in newtag: refreshview= True\n else:\n print(\"Warning: No entity #%d. Skipping.\" % myeid)\n if refreshview: OUT.default(MMEL,E) # AUTODUMP ", "def Template(Fenetre_largeur,Fenetre_hauteur):\r\n li= Select_ligne(\"Nombre de lignes: \",Fenetre_largeur,Fenetre_hauteur)\r\n nom=\"Template\"\r\n fich=\"Template\"\r\n version=0\r\n while Path(\"stages/\"+fich+\".txt\").is_file() == True:\r\n version+=1\r\n fich=nom+str(version)\r\n fichier=open(\"stages/\"+fich+\".txt\",'w')\r\n fichier.write(str(li))\r\n fichier.write(\"\\n\")\r\n fichier.write(\"\\n\")\r\n for i in range(li):\r\n for j in range(10):\r\n fichier.write(\"0,0|\")\r\n fichier.write(\"\\n\")\r\n fichier.write(\"\\n\")\r\n fichier.write(\"gauche: resistance, droite: bonus\")\r\n fichier.write(\"\\n\")\r\n fichier.write(\"resistance max: 3\")\r\n fichier.write(\"\\n\")\r\n fichier.write(\"6=barre+\")\r\n fichier.write(\"\\n\")\r\n fichier.write(\"7=score+\")\r\n fichier.close()", "def lees_inhoud(bestandsnaam):\r\n bestand = open(bestandsnaam)\r\n header = \"\"\r\n sequentie = \"\"\r\n for regel in bestand:\r\n if regel.startswith(\">\"):\r\n header += regel.rstrip()\r\n else:\r\n sequentie += regel.rstrip()\r\n return header, sequentie", "def execute(species_file_name = '../data_external/b_neat_species.txt', \r\n images_file_name = '../data_external/b_neat_images.txt', \r\n out_file_name = '../data_prepared/media_b_neat.txt', \r\n infile_encoding = 'utf16',\r\n outfile_encoding = 'utf16',\r\n field_separator = '\\t', \r\n row_delimiter = '\\r\\n'): # For windows usage.\r\n try:\r\n # Read species file and store taxonid:name in dictionary.\r\n # Header: id, hierarchy, species_name, author_year, last_modified\r\n speciesdict = {}\r\n speciesfile = codecs.open(species_file_name, mode = 'r', encoding = infile_encoding) \r\n # Iterate over rows in file.\r\n for rowindex, row in enumerate(speciesfile):\r\n if rowindex == 0: # First row is assumed to be the header row.\r\n headers = list(map(str.strip, row.split(field_separator)))\r\n # headers = list(map(str, headers))\r\n else:\r\n row = list(map(str.strip, row.split(field_separator)))\r\n # row = list(map(str, row))\r\n #\r\n speciesdict[row[0]] = row[2]\r\n speciesfile.close()\r\n # Create outdatafile.\r\n out = codecs.open(out_file_name, mode = 'w', encoding = outfile_encoding)\r\n # Header, define and print.\r\n outheader = ['Scientific name', 'Media id', 'Media type', 'User name', 'Sort order', \r\n 'Location', 'Latitude DD', 'Longitude DD', 'Media format', 'Date', \r\n 'Date added', 'Title', 'Description', 'Creator', 'Publisher', 'Contributor', 'Rights' \r\n# , 'Old mediaid'\r\n ]\r\n out.write(field_separator.join(outheader) + row_delimiter)\r\n # Open image file for reading.\r\n imagesfile = codecs.open(images_file_name, mode = 'r', encoding = infile_encoding) \r\n # Iterate over rows in file.\r\n for rowindex, row in enumerate(imagesfile):\r\n if rowindex == 0: # First row is assumed to be the header row.\r\n # Header: id, species_id, filename, users_id, sort_order, date_added, location, latitude, longitude, dc_title, dc_creator, dc_description, dc_publisher, dc_contributor, dc_date, dc_type, dc_format, dc_rights, last_modified\r\n pass\r\n else:\r\n row = list(map(str.strip, row.split(field_separator)))\r\n # row = list(map(str, row))\r\n #\r\n # 0 : id \r\n # 18 : last_modified\r\n scientificname = speciesdict[row[1]] # 1 : species_id\r\n mediaid = row[2] # 2 : filename\r\n mediatype = row[15] # 15 : dc_type\r\n username = row[3] # 3 : users_id # TODO: Convert to user name.\r\n sortorder = row[4] # 4 : sort_order \r\n location = row[6] # 6 : location \r\n latitude = row[7] # 7 : latitude \r\n longitude = row[8] # 8 : longitude \r\n mediaformat = row[16] # 16 : dc_format\r\n date = row[14] # 14 : dc_date \r\n date_added = row[5] # 5 : date_added \r\n title = row[9] # 9 : dc_title \r\n description = row[11] # 11 : dc_description \r\n creator = row[10] # 10 : dc_creator \r\n publisher = row[12] # 12 : dc_publisher \r\n contributor = row[13] # 13 : dc_contributor \r\n rights = row[17] # 17 : dc_rights\r\n # Temp: \r\n oldmediaid = mediaid # To be temporary used for name translation. \r\n \r\n # Replace mediaid by new name format.\r\n parts = mediaid.split('_')\r\n print('Mediaid OLD: ' + mediaid + ' ' + \r\n 'NEW: ' + scientificname + '_' + parts[-1])\r\n mediaid = scientificname + '_' + parts[-1] # Replace.\r\n \r\n # Create row.\r\n outrow = [scientificname, mediaid, mediatype, username, sortorder, \r\n location, latitude, longitude, \r\n mediaformat, date, date_added, \r\n title, description, creator, publisher, contributor, rights \r\n# , oldmediaid\r\n ] \r\n # Print row.\r\n out.write(field_separator.join(outrow) + row_delimiter) \r\n # \r\n imagesfile.close()\r\n out.close \r\n #\r\n except Exception as e:\r\n print(\"ERROR: Exception %s\" % (e.args[0]))\r\n print(\"ERROR: Script will be terminated.\")\r\n sys.exit(1)\r\n finally:\r\n pass", "def insert_text_in_file(file_path: pathlib.Path, tag: str, text: str) -> bool:\n lines: List[str] = []\n with file_path.open('r') as f:\n lines = f.readlines()\n for ii, line in enumerate(lines):\n if line.find(tag) >= 0:\n lines.insert(ii + 1, text)\n with file_path.open('w') as f:\n f.writelines(lines)\n return True\n return False" ]
[ "0.56019914", "0.55601597", "0.5558506", "0.55486673", "0.5365312", "0.5310506", "0.52868605", "0.5284282", "0.5265695", "0.5260303", "0.52433544", "0.523879", "0.52328277", "0.52186084", "0.52185684", "0.5216578", "0.51640344", "0.5162988", "0.51482356", "0.5122066", "0.51198757", "0.5108365", "0.5107683", "0.51070845", "0.50937676", "0.5070779", "0.506947", "0.5064073", "0.50607663", "0.5050024", "0.50447863", "0.50319046", "0.50117415", "0.5008788", "0.50077754", "0.5001915", "0.49968863", "0.49949825", "0.49728835", "0.49650952", "0.49606508", "0.4957004", "0.49541298", "0.4950325", "0.4934306", "0.4930839", "0.49262628", "0.49213418", "0.49151388", "0.48977023", "0.4896623", "0.48764738", "0.4874319", "0.48671427", "0.48509744", "0.4841312", "0.48382077", "0.48283765", "0.48278624", "0.48251978", "0.4822742", "0.4821185", "0.48141205", "0.48095858", "0.47941703", "0.47923064", "0.4791787", "0.4791351", "0.47909874", "0.4790788", "0.47879574", "0.47876734", "0.47868267", "0.47856197", "0.4783528", "0.47805697", "0.47783607", "0.47722998", "0.47720715", "0.47672886", "0.47669768", "0.4765143", "0.47593734", "0.4757972", "0.47467723", "0.47428587", "0.47409853", "0.47408977", "0.47388628", "0.4737699", "0.47308952", "0.473058", "0.47298765", "0.4728465", "0.47270367", "0.47232538", "0.47204393", "0.4719166", "0.47121304", "0.47098845" ]
0.56634676
0
Salva los cambios al archivo especificado en newfile, o hace una copia del archivo original (filename+'~') y salva el contenido en "filename"
def saveFile(self,newfile=None): if newfile == None: shutil.move(self.filename,self.filename+'~') self.handler = open(self.filename,'w') else: self.handler = open(newfile,'w') self.handler.writelines(self.content) self.handler.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def newfile(filename):\n # Open the new file for writing\n with open(filename, \"w\") as file:\n pass", "def renewFile(filename):\n\n\tfileRepo = repertoire + filename + extension # Position du fichier\n\n\t# Ouvre en ecriture et l'ecrase\n\t# La methode with ferme le fichier automatiquement\n\twith open(fileRepo, \"w\") as robFile:\n\t\trobFile.write(filename + \"\\n\") # Ecrit le nom du fichier au debut", "def overwrite_file(self):\n\n new_file = open(self.temp_filename, 'r')\n file = open(self.filename, 'w')\n file.writelines(new_file.readlines())\n new_file.close()\n file.close()\n os.remove(self.temp_filename)", "def copy_file(file_name, new_file_name):\n\n import os\n\n if not os.path.exists(file_name):\n raise FileNotFoundError\n\n with open(str(file_name), 'rb') as infile:\n with open(str(new_file_name), 'wb') as outfile:\n while True:\n buff = infile.read(10240)\n if buff:\n outfile.write(buff)\n else:\n break\n\n return", "def write_output_file(updated_file, file_path):\n orig_file = file_path + \".orig\"\n # remove an existion .orig file\n if os.path.isfile(orig_file):\n os.remove(orig_file)\n # rename the current file\n os.rename(file_path, orig_file)\n # write the new file\n with open(file_path, mode='w', encoding='utf-8', newline='') as file_out:\n for line in updated_file:\n file_out.write(line)", "def write_to_file(original_path, new_path):\n print(f\"[INFO]: Transform data from binary to text file {new_path}\")\n with open(new_path, mode='wt', encoding='utf-8') as new_file:\n with open(original_path, mode='rb') as original_file:\n for line in original_file:\n new_file.write(line.decode())", "def make_backup(file_name):\n copy2(file_name, file_name + '.bak')", "def encrypt_file(filename, key):\n # init fermet\n f = Fernet(key)\n with open(filename, \"rb\") as file:\n # read all file data\n file_data = file.read()\n # encrypt data\n encrypted_data = f.encrypt(file_data)\n # delete file\n remove(filename)\n # generate new filename\n new_filename = generate_new_filename(filename, key, True)\n # write the encrypted file\n with open(new_filename, \"wb\") as file:\n print(\"Encrypted: \" + new_filename)\n file.write(encrypted_data)\n\n return new_filename", "def replace_file(filename, contents):\n filename = path.join(PATH_ROOT, filename)\n filename_bak = \"%s.release.bak\" % filename\n os.rename(filename, filename_bak)\n with open(filename, \"w\") as out_file:\n out_file.write(\"\".join(contents))\n shutil.copymode(filename_bak, filename)\n os.remove(filename_bak)", "def save_uploaded_file(self, file, new_file_name):\n\n root_path = pathlib.Path(__file__).resolve().parents[1]\n\n filepath = os.path.join(root_path, FILE_DIR, new_file_name)\n\n data = file.read()\n\n with open(filepath, 'wb') as f:\n f.write(bytes(data))\n\n relative_filepath = os.path.join(\"/\", FILE_DIR, new_file_name)\n\n return relative_filepath", "def createBackup(self, filename):\n if (not os.path.isfile(filename + '.bak')) and os.path.isfile(filename):\n with open(filename + '.bak', 'wb') as bakf:\n with open(filename, 'rb') as oldf:\n bakf.write(oldf.read())\n print(filename + \" backed up\")", "def single_file_write(self, file_pointer, filename):\n temp_file = \"resources/temp_file\"\n\n file_pointer.seek(0)\n with open(temp_file, \"wb\") as output_file:\n shutil.copyfileobj(file_pointer, output_file)\n\n os.rename(temp_file, filename)\n log.info(\"Saved file: %s\", filename)", "def put_file(self, src_fname, dst_fname):\n dst_fname = os.path.normpath(dst_fname)\n self.mkdirs(os.path.dirname(dst_fname))\n self._put(src_fname, dst_fname)", "def write_file(self, filehandle, filename):\n filehandle.seek(0)\n backuppath = os.path.join(self.FTP_PATH, filename)\n self.ftp.storbinary('STOR ' + backuppath, filehandle)", "def putFile(self, filename):\n basename = os.path.basename(filename)\n fp = open(filename, 'rb')\n self.ftp.storbinary('stor ' + basename, fp)\n fp.close();", "def writeFile(self, filename):\n\n s = self.asString()\n if os.access(filename, os.F_OK):\n raise RuntimeError(\"file %s already exists -- not overwritten.\" % (filename))\n \n f = file(filename, \"w\")\n f.write(s)\n f.close()", "def cp_to_file(fn0, fn):\n\n # keep rewriting attributes\n shutil.copyfile(fn0, fn)", "def overwrite_original_file(self, value):\n self.__overwrite_original_file = value", "def rename_file(original, content_type, condo_name):\n condo_name = sanitize_filename(condo_name)\n original_file = os.path.join(DOWNLOAD_PATH, original)\n new_name = os.path.join(DOWNLOAD_PATH, content_type + \"\\\\\" + condo_name + \".txt\")\n extracted_file = os.path.join(DOWNLOAD_PATH, unzip_file(original_file))\n if os.path.exists(new_name):\n os.remove(new_name)\n os.renames(extracted_file, new_name)\n os.remove(original_file)", "def rename_file (self):\n\t\tassert self.__filename, \"Renaming could not complete because the new filename could not be determined, one or more needed arguments is empty!\"\n\t\tos.rename( self._file.path, self.__filename )\n\t\t\n\t\tif self.verbose and self.log :\tself.log.info( 'File renamed from %s to %s' % (self._file.path, self.__filename))", "def rename_file(path, old_name, new_name):\n \n old_file = os.path.join(path, old_name)\n new_file = os.path.join(path, new_name)\n os.rename(old_file, new_file)", "def copy_source(self, filename, new_filename, **kw):\n\n file_path = os.path.join(self.storage_path, filename)\n new_file_path = os.path.join(self.storage_path, new_filename)\n shutil.copyfile(file_path, new_file_path)", "def write(self, filename, text):\r\n self._call(\"-rm\", filename)\r\n with temporary_file() as fp:\r\n fp.write(text)\r\n fp.flush()\r\n return self._call('-copyFromLocal', fp.name, filename)", "def newfile(self) :\n\n\t\tfrom tempfile import mkstemp\n\t\timport os\n\t\tglobal configurer\n\n\t\tfd,name = mkstemp(suffix='.blend')\n\t\tos.close(fd)\n\t\tself.name = name\n\t\tfd = open(name,'wb', configurer.get('ServerBufferSize'))\n\t\tself.fd = fd\n\t\tprint name\n\t\treturn 1", "def _set_filename(self, filename):\n tmp_file = '_'.join(filename.split())\n# new_file = new_file.replace(\"'\",\n# '_').replace('-',\n# '_').replace(' ',\n# '_').replace('(', '_').replace(')', '_')\n new_file = ''\n pathsep = os.path.sep \n if sys.platform == 'win32':\n pathsep = '/'\n for char in tmp_file:\n if char.isalnum() or char in ['.', '_', ':', pathsep, '-']:\n new_file += char\n try:\n shutil.copy(filename, new_file)\n except shutil.Error, err:\n msg = \"`%s` and `%s` are the same file\" % (filename, new_file)\n if str(err) == msg:\n pass\n else:\n raise err\n utils.ensure_file_exists(new_file)\n self._filename = new_file\n self._basename, self._ext = os.path.splitext(self._filename)", "def write_to_file(self, filename: str) -> None:", "def save_to_file(self, filename, overwrite=False):\n arg = \"x\"\n if overwrite:\n arg = \"w\"\n\n with open(filename, arg) as fd:\n self.save(fd)", "def copy_file(file, destination):\n with open(file, 'rb') as infile, open(destination, 'wb') as outfile:\n outfile.write(infile.read())", "def RenameFile(self, oldname: str, newname: str) -> None:\n ...", "def trash_file(file_to_trash, document_name) :\n dtpo_log('debug', \"trash_file file -> %s\", file_to_trash)\n\n source = Config.config.get_source_directory() + '/' + file_to_trash\n destination = Config.config.get_trash_directory() + '/' + document_name\n\n os.rename(source, destination)", "def encrypt_and_store_file(path_to_original_file):\n\t\toriginal_file_name, _ = os.path.splitext(path_to_original_file)\n\t\toutput_string = EncryptDecrypt.ascii_string_to_hex(EncryptDecrypt.file_to_string(path_to_original_file))\n\t\twith open(original_file_name+\".enc\", \"w+\") as save_file:\n\t\t\tsave_file.write(output_string)\n\t\tos.remove(path_to_original_file)", "def renameFile(oldPath,newPath,makeBack=False):\n if os.path.exists(newPath): \n if makeBack:\n backPath = newPath+'.bak'\n if os.path.exists(backPath):\n os.remove(backPath)\n os.rename(newPath,backPath)\n else:\n os.remove(newPath)\n os.rename(oldPath,newPath)", "def _overwrite(self, filename, s):\r\n if os.path.exists(self._html_dir): # Make sure we're not immediately after a clean-all.\r\n with open(os.path.join(self._html_dir, filename), 'w') as f:\r\n f.write(s)", "def save(annotation, new_filename, original_path):\n \n destination = \"../../standardized-data/\"\n if os.path.isdir(destination + \"/\" + annotation) == False:\n os.mkdir(destination + \"/\" + annotation)\n print(annotation, \"FOLDER CREATED\")\n if os.path.exists(destination + \"/\" + annotation + \"/\" + new_filename):\n print('FILE EXISTS: DOUBLE CHECK FOR DUPLICATION :', new_filename)\n else:\n shutil.copyfile(original_path, destination + \"/\" + annotation + \"/\" + new_filename)\n return", "def fileRenameandReplace(filename,newfilename):\n try:\n os.rename(filename,newfilename)\n logging.info(\"Json file renamed in PD path\")\n except Exception as er:\n print (\"Not able to rename the json file \")\n return False", "def filewrite(self, filename):\n io.write(self, filename)", "def copy_rename_file(source_file_path: str, target_dir: str, new_name: str) -> str:\n shutil.copy2(source_file_path, target_dir)\n target_path = os.path.join(target_dir, os.path.basename(source_file_path))\n new_file_name = new_name + get_extension(source_file_path)\n new_file_path = os.path.join(target_dir, new_file_name)\n os.rename(target_path, new_file_path)\n return new_file_path", "def _autoconfig_backup_file(filename):\n\n # Does a copy of the file exist, if not create one\n ofile = filename + \".orig\"\n (ret, stdout, stderr) = VPPUtil.exec_command(\"ls {}\".format(ofile))\n if ret != 0:\n logging.debug(stderr)\n if stdout.strip(\"\\n\") != ofile:\n cmd = \"sudo cp {} {}\".format(filename, ofile)\n (ret, stdout, stderr) = VPPUtil.exec_command(cmd)\n if ret != 0:\n logging.debug(stderr)", "def copyfile(source, dest, newname=None):\n\n if not os.path.exists(source):\n #print 'no such file %s' %source\n return False\n shutil.copy(source, newname)\n dest = os.path.join(dest, newname)\n if os.path.exists(dest):\n os.remove(dest)\n shutil.move(newname, dest)\n return True", "def putFile(self, _src, _dst, delExisting = True):\n\n #-------------------- \n # Delete existing _dst from XNAT host.\n #-------------------- \n if delExisting:\n r = self.__httpsRequest('DELETE', _dst)\n #print(\"%s Uploading\\nsrc: '%s'\\n_dst: '%s'\"%(_src, _dst))\n\n\n\n #-------------------- \n # Clean '_dst' string and endcode\n #-------------------- \n _dst = Xnat.path.makeXnatUrl(self.host, _dst)\n _dst = str(_dst).encode('ascii', 'ignore')\n\n\n\n #-------------------- \n # Put the file in XNAT using the internal '__httpsRequest'\n # method.\n #-------------------- \n with open(_src, 'rb') as f:\n response = self.__httpsRequest('PUT', _dst, files={'file': f}, \n headers={'Content-Type': 'application/octet-stream'}, stream=True)\n\n return response", "def hacerBackup(self, rutaOrigen, archivo, rutaDestino):\n try:\n shutil.copy2(archivo, rutaDestino)\n print \"Backup realizado exitosamente!\"\n except Exception, e:\n print \"hacerBackup - Error >> \", e", "def touch(file_name):\n open(file_name, 'a').close()", "def duplicate_file():\n file = TEST_CONTENT_REPO / PACKS_DIR / \"Sample01\" / TEST_PLAYBOOKS_DIR / \"playbook-sample_test1.yml\"\n new_file = TEST_CONTENT_REPO / PACKS_DIR / \"Sample02\" / TEST_PLAYBOOKS_DIR / \"playbook-sample_test1.yml\"\n try:\n copyfile(file, new_file)\n yield\n finally:\n new_file.unlink()", "def rewrite_all_file(self, data):\r\n with open(self.file_name, 'w', encoding='utf-8') as self.file:\r\n self.file.write(data)", "def writefile(filename, content):\n with open(Path(os.path.expanduser(filename)), 'w') as outfile:\n outfile.write(content)", "def replace_file(new_content, current_location):\r\n\tif should_replace(new_content, current_location):\r\n\t\tabs_path = os.path.abspath(current_location)\r\n\t\tcurrent_dir, filename = os.path.split(abs_path)\r\n\t\ttmp_filename = '{0}.{1}'.format(filename, time.time())\r\n\t\ttmp_path = os.path.join(current_dir, tmp_filename)\r\n\r\n\t\ttry:\r\n\t\t\twith open(tmp_path, 'w') as tmp:\r\n\t\t\t\ttmp.write(new_content.getvalue())\r\n\t\t\tos.rename(tmp_path, abs_path)\t\r\n\t\texcept IOError:\r\n\t\t\tprint('Failed to replace ''{0}'''.format(abs_path), file=sys.stderr)\r\n\t\t\treturn False\r\n\t\treturn True\r\n\treturn False", "def renew_photo(file):\n dummy, temp_filename = tempfile.mkstemp()\n file.save(temp_filename)\n # Compute filename\n hash_txt = sha256sum(temp_filename)\n dummy, suffix = os.path.splitext(file.filename)\n hash_filename_basename = hash_txt + suffix\n hash_filename = os.path.join(\n insta485.app.config[\"UPLOAD_FOLDER\"],\n hash_filename_basename\n )\n # Move temp file to permanent location\n shutil.move(temp_filename, hash_filename)\n insta485.app.logger.debug(\"Saved %s\", hash_filename_basename)\n return hash_filename_basename", "def rename_file(old_path, new_path):\n if os.path.exists(new_path):\n raise FileExistsError(errno.EEXIST, os.strerror(errno.EEXIST),\n old_path, new_path)\n os.rename(old_path, new_path)", "def write(self, filename):\n pass", "def write(self, filename):\n pass", "def download_to_filename(self, filename):\n copyfile(self.name, filename)", "def new_filename(original_filename, new_locale):\r\n f = path(original_filename)\r\n new_file = f.parent.parent.parent / new_locale / f.parent.name / f.name\r\n return new_file.abspath()", "def overwrite_original_file(self):\n return self.__overwrite_original_file", "def mv(self, source: str, filename: str) -> None:\n\n self.cp(source, filename)\n self.rm(source)", "def CopyFileTo(self, filename): # real signature unknown; restored from __doc__\n pass", "def newFile(self):\n self.open_file_name = None\n self.ui.main_edit.setText(\"\")\n self.saveEnabled(False)", "def debz(oldfn, newfn):\n if os.path.isfile(newfn):\n print(\"Error: refusing to overwrite existing file '%s'\" % (newfn, ))\n return\n output = open(newfn, 'wb')\n fobj = open(oldfn, 'rb')\n\n output.write(fobj.read(24))\n while True:\n sz = struct.unpack('>L', fobj.read(4))[0]\n chunk = fobj.read(sz)\n if not chunk:\n break\n output.write(bz2.decompress(chunk))\n # unsure of this\n if sz != len(chunk):\n break\n\n output.close()", "def file_name(self, new_file_name):\n self._file_name = os.path.abspath(new_file_name).replace(\"\\\\\", \"/\")", "def write_if_diff(filename, contents, verbose=True):\n if not os.path.isfile(filename):\n existing = None\n else:\n with open(filename, \"r\") as f:\n existing = f.read()\n if contents == existing:\n if verbose:\n print(filename + \" generated is the same as existing file, skipping.\")\n return\n with open(filename, \"w\") as f:\n if verbose:\n print(\"Writing\", filename)\n f.write(contents)", "def makeBackup(self):\n #--File Path\n original = self.path\n #--Backup\n backup = self.path+'.bak'\n shutil.copy(original,backup)\n #--First backup\n firstBackup = self.path+'.baf'\n if not os.path.exists(firstBackup):\n shutil.copy(original,firstBackup)", "def replace_file(new_content, current_location):\n\tif should_replace(new_content, current_location):\n\t\tabs_path = os.path.abspath(current_location)\n\t\tcurrent_dir, filename = os.path.split(abs_path)\n\t\ttmp_filename = '{0}.{1}'.format(filename, time.time())\n\t\ttmp_path = os.path.join(current_dir, tmp_filename)\n\n\t\ttry:\n\t\t\twith open(tmp_path, 'w') as tmp:\n\t\t\t\ttmp.write(new_content.getvalue())\n\t\t\tos.rename(tmp_path, abs_path)\t\n\t\texcept IOError:\n\t\t\tprint('Failed to replace ''{0}'''.format(abs_path), file=sys.stderr)\n\t\t\treturn False\n\t\treturn True\n\treturn False", "def compressFile(source, target):\n data = cake.filesys.readFile(source)\n try:\n data = zlib.compress(data, 1)\n except zlib.error, e:\n raise EnvironmentError(str(e))\n cake.filesys.writeFile(target, data)", "def ecrire_fichier(nom_du_fichier,texte) :\r\n with open(nom_du_fichier,\"w\") as fichier : #j'ouvre le fichier en mode w => write\r\n fichier.write(texte) #j'ecris la chaine dans le fichier\r", "def __concatonate_files(self, new_file_name, parent_folder):\n\n # make the output directory\n output_file = self.save_directory + \"/\" + new_file_name\n\n # check if save_directory exists\n if not os.path.exists(self.save_directory):\n try:\n # make the directory\n os.makedirs(self.save_directory)\n except PermissionError:\n # if the user is unable to write to this directory, we should not continue\n print(\"You do not have the correct permissions for creating a directory here. Please try again.\")\n exit(-1)\n\n barcode_files = []\n for root, directory, files in os.walk(parent_folder):\n # we need to know where each file is in the barcode folder so we can read data from it\n for name in files:\n barcode_files.append( os.path.join(root, name) )\n\n with open(output_file, 'w') as writer:\n for name in barcode_files:\n with open(name, 'r') as reader:\n for line in reader:\n writer.write(line)", "def inplace(filename, mode='r', buffering=-1, encoding='utf-8', errors=None,\n newline=None, backup_extension=None):\n # from Martijn Pieters blog\n\n # move existing file to backup, create new file with same permissions\n # borrowed extensively from the fileinput module\n if set(mode).intersection('wa+'):\n raise ValueError('Only read-only file modes can be used')\n\n backupfilename = filename + (backup_extension or os.extsep + 'bak')\n try:\n os.unlink(backupfilename)\n except os.error:\n pass\n os.rename(filename, backupfilename)\n readable = io.open(backupfilename, mode, buffering=buffering,\n encoding=encoding, errors=errors, newline=newline)\n try:\n perm = os.fstat(readable.fileno()).st_mode\n except OSError:\n writable = open(filename, 'w' + mode.replace('r', ''),\n buffering=buffering, encoding=encoding, errors=errors,\n newline=newline)\n else:\n os_mode = os.O_CREAT | os.O_WRONLY | os.O_TRUNC\n if hasattr(os, 'O_BINARY'):\n os_mode |= os.O_BINARY\n fd = os.open(filename, os_mode, perm)\n writable = io.open(fd, \"w\" + mode.replace('r', ''), buffering=buffering,\n encoding=encoding, errors=errors, newline=newline)\n try:\n if hasattr(os, 'chmod'):\n os.chmod(filename, perm)\n except OSError:\n pass\n try:\n yield readable, writable\n except Exception:\n # move backup back\n try:\n os.unlink(filename)\n except os.error:\n pass\n os.rename(backupfilename, filename)\n raise\n finally:\n readable.close()\n writable.close()\n try:\n os.unlink(backupfilename)\n except os.error:\n pass", "def copyFile( src, dest ):\n\tinFile = open( src, 'r' )\n\toutFile = open( dest, 'w' )\n\tfor line in inFile:\n\t\toutFile.write( line )\n\toutFile.close()\n\tinFile.close()", "def write(self, filename):\n assert filename[-3:]=='.fz','name must end in .fz'\n\n files.makedir_fromfile(filename)\n\n ucfilename=filename[0:-3]\n bname = os.path.basename(ucfilename)\n\n tmp_path = os.path.join(\n files.get_temp_dir(),\n bname,\n )\n files.makedir_fromfile(tmp_path)\n\n with TempFile(tmp_path) as tfile:\n super(CosmosMEDSMaker,self).write(tfile.path)\n self._compress_meds_file(tfile.path, filename)", "def updateFile(filename, content):\n\tfilename = adaptPath(filename)\n\tif filename != None:\n\t\ttry:\n\t\t\toldContent = open(filename, \"r\").read()\n\t\texcept IOError:\n\t\t\toldContent = \"\"\n\t\tif oldContent != content:\n\t\t\tfile = open (filename, \"w\")\n\t\t\tfile.write(content)\n\t\t\tfile.close()\n\treturn content", "def simple_copy():\n src, des = rem('grab')\n # All we need to do is keep the filename same\n # Since the file is of 0 bytes\n des_name = os.path.basename(des)\n des_dir = os.path.dirname(des)\n\n des = os.path.join(des_dir, des_name)\n\n # Now simply open and close des in write mode\n TEMP_STREAM = open(des, 'w')\n TEMP_STREAM.close()\n\n return True", "def createFile(self, fileName):\n\n with open(fileName, \"w+\") as f:\n return f", "def create_new_file(\n filename=\"default.txt\", filepath=\"default_path\", os_file=True, suffix=\".txt\"\n):\n\n count = 1\n if filepath == \"default_path\":\n filepath = \"\"\n elif filepath == \"\":\n pass\n else:\n filepath += \"/\"\n\n filename = filename.split(\".\")[0]\n\n # First check if Filename already exists, if so, add a counter to the file.\n if os.path.isfile(os.path.abspath(filepath + filename + suffix)):\n l.warning(\"Warning filename \" + str(filename) + \" already exists!\")\n filename = filename + \"_\" + str(count) # Adds suffix to filename\n while os.path.isfile(\n os.path.abspath(filepath + filename + suffix)\n ): # checks if file exists\n count += 1\n countlen = len(str(count))\n filename = filename[:-countlen] + str(count)\n l.info(\"Filename changed to \" + filename + \".\")\n\n filename += str(suffix)\n if os_file:\n fp = os.open(\n os.path.abspath(filepath + filename), os.O_WRONLY | os.O_CREAT\n ) # Creates the file\n else:\n fp = open(os.path.abspath(filepath + filename), \"w\")\n\n l.info(\"Generated file: \" + str(filename))\n\n return fp, count", "def CopyFile(path, file, new):\n\n if not path.endswith((\"/\", \"\\\\\")):\n path = path + \"\\\\\"\n\n shutil.copy(path + file, path + new)\n return 0", "def write_file(filename, data):\n file = open(filename, \"a\")\n file.write(data)\n file.close()", "def rename_file(source, oldname, newname):\n #source = client_variables.output_folder\n renamefiles = os.listdir(source)\n ext = (\".xlsx\", \".csv\", \".pdf\", \".png\")\n for renamefile in renamefiles:\n if renamefile.endswith(ext):\n renamefile = source + \"/\" + renamefile\n print \"renaming:\", renamefile\n newname = source + \"/\" + newname\n print \"newname:\", newname\n os.rename(renamefile, newname)\n elif renamefile.startswith(oldname):\n renamefile = source + \"/\" + renamefile\n print \"renaming:\", renamefile\n newname = source + \"/\" + newname\n print \"newname:\", newname\n os.rename(renamefile, newname)", "def _restore_file(file):\n\n os.remove(file)\n os.rename(file + '.bak', file)", "def saveUploadedRecipe(self, file):\r\n filename = str(file)\r\n with open(os.path.join(main.settings.RECIPE_DIR, filename), 'wb+') as destination:\r\n for chunk in file.chunks():\r\n destination.write(chunk)", "def write_file(filename):\r\n if Py3:\r\n return open(filename, \"w\", newline='')\r\n return open(filename, \"wb\")", "def save(self):\n if self.hasChanged:\n filePath = self.path\n tempPath = filePath+'.tmp'\n fileDir = os.path.split(filePath)[0]\n if not os.path.exists(fileDir): os.makedirs(fileDir)\n cPickle.dump(self.data,open(tempPath,'w'))\n renameFile(tempPath,filePath,True)\n self.hasChanged = False", "def save_file(file_name: str, data: str) -> None:\n dir_path: str = create_dir(dir_name='out')\n file_path: str = os.path.join(dir_path, file_name)\n if os.path.isfile(file_path):\n LOGGER.warning(f'{file_path} already exists. Will be overwritten...')\n with open(file_path, 'w') as file:\n file.write(data)\n LOGGER.info(f'saved {file_name}')", "def make_backup(filename, html):\n\n with open(filename, 'wb') as f:\n f.write(html)", "def put(self, src, dst):\r\n abs_src = os.path.expanduser(src)\r\n assert os.path.exists(abs_src), 'File does not exist, cannot copy: %s' % abs_src\r\n return self._do_put(abs_src, dst)", "def new_file(self):\r\n self.filename = QFileDialog.getSaveFileName(\r\n None, 'Title', '', 'TXT (*.txt)'\r\n )\r\n if self.filename[0]:\r\n self.currentfile = open(self.filename[0], 'w')\r\n (self.base_name, self.ext) = os.path.splitext(self.filename[0])\r\n self.FilePath.setText(self.filename[0])", "def rename_file_content(topdir, src, dst):\n\n print 'begin rename file content from %s to %s' % (src, dst)\n backup_file_ext = 'xxx_sed_backup_file_001'\n os.popen('find %s -type file | xargs sed -i %s -e \"s/%s/%s/g\"' % (topdir, backup_file_ext, src, dst)).read()\n #mac OSX does not support sed without backup\n #so we remove those\n os.popen('find %s -name \"*%s\" | xargs rm -f' % (topdir, backup_file_ext)).read()", "def saveAs(self, newPath=None):\n if newPath is None:\n # ask user for new file name/path\n pass", "def rename_file(fname):\n x,y = load_file(fname)\n date=y[0].split(\".\")\n if len(y[2])<20:\n title=y[2]\n else:\n title=y[2][0:20]\n title=title.replace(\" \",\"_\")\n \n new_name=\"{}{}{}{}.csv\".format(date[2],date[1],date[0],title)\n new_appendix=rename_appendix(y[10],new_name)\n os.rename(fname,new_name)\n replace_line(new_name,10,'Anhang;\"{}\"'.format(new_appendix))\n return new_name", "def Add_File(self,txn,filename,newcontents):\n opid = self.new_opid()\n fullname = os.path.join(self.home,filename)\n #if not self.tx.dir_exists(os.path.dirname(fullname)):\n # raise OSError(errno.ENOENT,\"No directory: %r\"%os.path.dirname(fullname))\n xaction = ReplaceAll_Operation(fullname,newcontents,opid)\n self._add_operation(txn,xaction)", "def filewrite(self, filename, data):\n try:\n filedata = data.decode(\"utf-8\")\n except Exception:\n filedata = data\n lock = FileLock(filename)\n lock.acquire()\n with open(filename, 'w+') as f:\n f.write(filedata)\n lock.release()", "def spit(filename, contents):\n with open(filename, 'w') as file:\n file.write(contents)", "def case_convert_file_to_file(source_path: str, dest_path: str, style: CaseStyleEnum) -> None:\n with open(source_path, 'r') as f:\n contents = f.read()\n new_contents = case_convert_stream(contents, style)\n with open(dest_path, 'w') as f:\n f.write(new_contents)", "def replace_in_file(path, old, new):\n with open(path) as fp:\n content = fp.read()\n\n lpf.ensure_removed(path)\n with open(path, 'w') as fp:\n fp.write(content.replace(old, new))", "def write_file(self):\n file = open(self.__file_path, 'w+')\n file.truncate(0)\n file.write(self.__content)\n file.close()", "def rename_file(source, destination, alog):\n\n # Some error checking against a legitimate source & destination.\n if not type(source) is str:\n raise CoreError('Source is not of str type.')\n elif not type(destination) is str:\n raise CoreError('Destination is not of str type.')\n elif not os.path.isfile(source):\n raise CoreError(source + ' is not a valid file.')\n\n head, tail = os.path.split(destination)\n if not os.path.isdir(head + '/'):\n try:\n os.makedirs(head + '/')\n except:\n raise CoreError('Failed to create new directory: '\n + (head + '/'))\n\n for i in range(0, len(MuzikArkive.illegal_name_characters)):\n if MuzikArkive.illegal_name_characters[i] in tail:\n tail = tail.replace(MuzikArkive.illegal_name_characters[i], '_')\n alog.rlog = MuzikArkive.illegal_name_characters[i] \\\n + ' was removed from ' + destination\n\n if not os.path.isfile(destination):\n try:\n os.rename(source, destination)\n except:\n raise CoreError('os.rename() Failed.')\n else:\n head, tail = destination.rsplit('.', 1)\n rname = True\n i = 1\n while rname:\n addon = '[' + str(i) + '].'\n if not os.path.isfile(head + addon + tail):\n try:\n os.rename(source, (head + addon + tail))\n except:\n raise CoreError('os.rename() Failed.')\n else:\n rname = False\n else:\n i += 1", "def rotate_file(cls, main_dir, temp_dir):\n\t\tif(os.path.isfile(main_dir)):\n\t\t\tos.remove(main_dir)\n\t\tcopyfile(temp_dir, main_dir)\n\t\tos.remove(temp_dir)", "def save(self, filename):\n pass", "def decrypt_file(filename, key):\n f = Fernet(key)\n with open(filename, \"rb\") as file:\n # read the encrypted data\n encrypted_data = file.read()\n # decrypt data\n decrypted_data = f.decrypt(encrypted_data)\n # delete file\n remove(filename)\n # generate new filename\n new_filename = generate_new_filename(filename, key, False)\n # write the encrypted file\n with open(new_filename, \"wb\") as file:\n print(\"Decrypted: \" + new_filename)\n file.write(decrypted_data)\n\n return new_filename", "def save_original(self, filename):\n return self.form.save_original(filename)", "def save_original(self, filename):\n return self.form.save_original(filename)", "def writeFile(file_name, file_text, mode='w+'):\n with open(file_name, mode) as file:\n file.write(file_text)", "def clear_file(filename):\n with open(filename, 'w'):\n pass", "def create_file():\r\n with open(fileName.strftime(\"%Y-%m-%d-%H-%M\")+\".txt\",\"w\") as file:\r\n file.write(\"\")" ]
[ "0.68023175", "0.6601903", "0.6470307", "0.6256966", "0.60828876", "0.60267633", "0.59504694", "0.5871682", "0.5786605", "0.5738598", "0.57240206", "0.5673847", "0.5622948", "0.5617441", "0.5615462", "0.55986905", "0.55614066", "0.55208856", "0.551383", "0.55105835", "0.55076134", "0.55026776", "0.55018353", "0.5500804", "0.549015", "0.5486434", "0.54844224", "0.54761565", "0.5466248", "0.54579103", "0.54556406", "0.5404251", "0.5396911", "0.53866905", "0.53797394", "0.537571", "0.5336119", "0.53117996", "0.53072125", "0.5291559", "0.52886355", "0.52818", "0.5279859", "0.52659523", "0.5263569", "0.52558595", "0.52446616", "0.5239447", "0.52360374", "0.52360374", "0.5230879", "0.52239776", "0.5217382", "0.5214899", "0.521078", "0.5207715", "0.52048796", "0.52047664", "0.5195475", "0.519512", "0.5189479", "0.5163264", "0.5158702", "0.5156134", "0.51552325", "0.5155105", "0.51519454", "0.51513237", "0.51504546", "0.51479995", "0.5147727", "0.51444376", "0.5139108", "0.5129438", "0.5120123", "0.51153374", "0.5113804", "0.5110432", "0.5107919", "0.5107354", "0.5097431", "0.50926954", "0.5088416", "0.50795376", "0.50713545", "0.50656086", "0.5065506", "0.50653875", "0.50601804", "0.5054336", "0.50515634", "0.5049564", "0.50345856", "0.503406", "0.5032511", "0.50307006", "0.50307006", "0.5029084", "0.50276273", "0.50258034" ]
0.68237215
0
``summary'' is a systemgenerated summary. ``references'' is a list of humanmade reference summaries
def score_summary(self, summary, references, summary_id='A'): try: self._write_config(references, Doc(summary_id, summary)) output = self._run_rouge() output = output.decode("utf-8") return self._parse_output(output) except CalledProcessError as e: print("Rouge returned a non-zero error code. Output was: ", file=sys.stderr) print("BEGIN OUTPUT ", file=sys.stderr) print(e.output, file=sys.stderr) print("END OUTPUT", file=sys.stderr) raise e finally: self._cleanup()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def summary(self, summary):\n\n self._summary = summary", "def summary(self, summary):\n\n self._summary = summary", "def summary(self, summary):\n\n self._summary = summary", "def summary(self, summary: str):\n return self.swag({\n 'summary': summary\n })", "def print_summary_and_genomes(summary, genome):\n for sample in summary:\n for ref in summary[sample]:\n if ref == \"metadata\":\n continue\n final = {\n \"sample_identifier\": sample,\n \"reference_organism\": ref,\n \"metadata\": summary[sample][\"metadata\"]\n }\n if genome.get(sample) and genome[sample].get(ref):\n final.update({\"status\": \"complete\"})\n # Add summary statistics\n final.update(summary[sample][ref])\n # Add genomic sequences\n final.update(genome[sample][ref])\n else:\n final.update({\"status\": \"notMapped\"})\n print(json.dumps(final))", "def orders_summary(self, orders_summary):\n\n self._orders_summary = orders_summary", "def errors_summary(self, errors_summary):\n\n self._errors_summary = errors_summary", "def summaries(self, summaries):\n if summaries is None:\n raise ValueError(\"Invalid value for `summaries`, must not be `None`\")\n\n self._summaries = summaries", "def _summary(obj):\n return obj.summary", "def summary(self, summary):\n if summary is None:\n raise ValueError(\"Invalid value for `summary`, must not be `None`\") # noqa: E501\n if summary is not None and len(summary) < 1:\n raise ValueError(\"Invalid value for `summary`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._summary = summary", "def summary_lines(self, summary_lines):\n\n self._summary_lines = summary_lines", "def _process_references0(self, references):\n if \"zarr_consolidated_format\" in references:\n # special case for Ike prototype\n references = _unmodel_hdf5(references)\n self.references = references", "def test_get_brief_summary_output(self):\n \n # Create a Resource object\n resource = Resource(1, \"White Noise\", Name(\"Don\", \"\", \"DeLillo\"), \n \"Delillo's White Noise follows narrator Jack \"\\\n \"Gladney, a professor at a small Liberal Arts \"\\\n \"college and describes an academic year. Jack \"\\\n \"teaches at a school called the \"\\\n \"College-on-the-Hill, where he serves as the \"\\\n \"department chair of Hitler studies. He lives in \"\\\n \"Blacksmith, a quiet college town, with his wife, \"\\\n \"Babette, and four of their children from earlier \"\\\n \"marriages: Heinrich, Steffie, Denise, and \"\\\n \"Wilder. Throughout the novel, various \"\\\n \"half-siblings and ex-spouses drift in and out \"\\\n \"of the family’s home.\",\n \"sci-fi\", \"English\", 1985, \"US\", 326, \"book\",\n [\"culture\", \"survival\", \"life\", \"society\"])\n \n # Assert expected results \n self.assertEqual(resource.get_brief_summary(), \"Delillo's White \"\\\n \"Noise follows narrator Jack Gladney, a professor \"\\\n \"at a \\nsmall Liberal Arts college and describes an \"\\\n \"academic year. Jack teaches \\nat ...\")", "def show_summary(self, lang):\n return self.summary % self.vars", "def _forward_summary(self, summaries):\n p = self.params\n for summary_key, summary_value in summaries.items():\n logging.info((summary_key, summary_value))\n summary_type = base_layer.get_summary_type_from_key(summary_key)\n assert summary_value.shape[0] == p.num_stages\n if p.unpack_summaries:\n # unstack summary_value\n unstacked_values = jnp.split(summary_value, p.num_stages)\n for i, v in enumerate(unstacked_values):\n base_layer.add_summary(f'{summary_key}/{i}', v, summary_type)\n else:\n base_layer.add_summary('{summary_key}', summary_value, summary_type)", "def printSummary(self):\n pass", "def references(self, references):\n\n self._references = references", "def add_summary(self, collections=None):\n\n name = self.group + '/' + self.name\n print(\"Add summary for \"+name)\n\n if self.stype == 0:\n self._placeholder = tf.placeholder(tf.float32, name=name)\n tf.summary.scalar(name, self._placeholder, collections=[collections])\n elif self.stype == 1:\n self._placeholder = tf.placeholder(\n tf.float32, shape=[None, None], name=name)\n tf.summary.image(name, self._placeholder, collections=[collections])\n elif self.stype == 2:\n self._placeholder = tf.placeholder(tf.float32, shape=[None], name=name)\n tf.summary.histogram(name, self._placeholder, collections=[collections])\n elif self.stype == 3:\n self._placeholder = tf.placeholder(tf.float32, name=name)\n tf.summary.scalar(name, self._placeholder, collections=[collections])\n if self._log:\n self._plot_summary = PlotSummaryLog(\n self.name, self.group, collections=[collections])\n else:\n self._plot_summary = PlotSummaryStandard(\n self.name, self.group, collections=[collections])\n elif self.stype == 4:\n self._plot_summary = PlotSummaryPlot(\n self.name, self.group, collections=[collections])\n elif self.stype == 5:\n self._placeholder = tf.placeholder(tf.float32, name=name)\n self._plot_summary = PlotSummaryImages(self.name, self.group, collections=[collections])\n else:\n raise ValueError('Wrong summary type')", "def rouge_l_summary_level(evaluated_sentences, reference_sentences):\n if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:\n raise ValueError(\"Collections must contain at least 1 sentence.\")\n\n # total number of words in reference sentences\n m = len(_split_into_words(reference_sentences))\n\n # total number of words in evaluated sentences\n n = len(_split_into_words(evaluated_sentences))\n\n union_lcs_sum_across_all_references = 0\n for ref_s in reference_sentences:\n union_lcs_sum_across_all_references += _union_lcs(evaluated_sentences,\n ref_s)\n return _f_p_r_lcs(union_lcs_sum_across_all_references, m, n)", "def _forward_summary(self, summaries):\n p = self.params\n for summary_key, summary_value in summaries.items():\n logging.info((summary_key, summary_value))\n summary_type = base_layer.get_summary_type_from_key(summary_key)\n assert summary_value.shape[0] == p.x_times\n if p.unpack_summaries:\n # unstack summary_value\n unstacked_values = jnp.split(summary_value, p.x_times)\n for i, v in enumerate(unstacked_values):\n base_layer.add_summary(f'{summary_key}/{i}', v, summary_type)\n else:\n base_layer.add_summary('{summary_key}', summary_value, summary_type)", "def add_ref_tag(basicSeqs):\r\n\r\n formattedBasicSeqs=list(basicSeqs) \r\n for record in formattedBasicSeqs:\r\n record.id=record.id+'_Ref'\r\n record.name=record.name+'_Ref'\r\n record.description=record.description+'_Ref'\r\n return formattedBasicSeqs", "def rouge_l_summary_level(evaluated_sentences, reference_sentences):\n if len(evaluated_sentences) <= 0 or len(reference_sentences) <= 0:\n raise ValueError(\"Collections must contain at least 1 sentence.\")\n\n # total number of words in reference sentences\n m = len(_split_into_words(reference_sentences))\n\n # total number of words in evaluated sentences\n n = len(_split_into_words(evaluated_sentences))\n\n union_lcs_sum_across_all_references = 0\n for ref_s in reference_sentences:\n union_lcs_sum_across_all_references += _union_lcs(evaluated_sentences,\n ref_s)\n return _f_p_r_lcs(union_lcs_sum_across_all_references, m, n)", "def summary(self, i):\n return self.__summaries[i]", "def summary(self):\r\n return '%s%s: %s%s %s%s' % (BLUE, self.title,\r\n GREEN, self.description,\r\n NORMAL, self.link)", "def _print_summary(case, summary):\n for dof, data in summary.items():\n b4b = data[\"Bit for Bit\"]\n conf = data[\"Configurations\"]\n stdout = data[\"Std. Out Files\"]\n print(\" \" + case + \" \" + str(dof))\n print(\" --------------------\")\n print(\" Bit for bit matches : \" + str(b4b[0]) + \" of \" + str(b4b[1]))\n print(\" Configuration matches : \" + str(conf[0]) + \" of \" + str(conf[1]))\n print(\" Std. Out files parsed : \" + str(stdout))\n print(\"\")", "def summarize(self):\n\n def increment_summary(summary_obj, case_obj):\n \"\"\"increment ReportSummary count was ReportCase status\n\n Whatever the status of the case object, the corresponding property\n will be incremented by 1 in the summary object\n\n Args:\n summary_obj (ReportSummary): summary object to increment\n case_obj (ReportCase): case object\n \"\"\"\n summary_obj.increment(case_obj.get_status())\n\n summary = ReportSummary()\n [increment_summary(summary, case) for case in self.cases]\n self.summary = summary", "def replaces_summary(self):\n return False", "def get_summary(self, s, base=None):\n summary = summary_patt.search(s).group()\n if base is not None:\n self.params[base + \".summary\"] = summary\n return summary", "def get_summary(self, s, base=None):\n summary = summary_patt.search(s).group()\n if base is not None:\n self.params[base + '.summary'] = summary\n return summary", "def publish_summary(self, jobs):\n pass", "def summary(self):\n if hasattr(self,\"_summary\"):\n return self._summary\n else:\n return {}", "def combine_summary_and_sets(sequence_read_set_file, summary_file):\n summaries = {}\n with open(summary_file) as f:\n for line in f:\n summary = json.loads(line)\n sample = summary.pop(\"sample\")\n reference_organism = summary.pop(\"reference\").lower()\n if summaries.get(sample):\n if summaries[sample].get(reference_organism):\n summaries[sample][reference_organism][\"summary_stats\"] = summary\n else:\n summaries[sample].update({reference_organism: {\n \"summary_stats\": summary\n }})\n else:\n summaries[sample]={reference_organism: {\"summary_stats\": summary}}\n f.close()\n with open(sequence_read_set_file) as f:\n for line in f:\n read_set = json.loads(line)\n sample = read_set.pop(\"sample\")\n read_set[\"urls\"] = read_set.pop(\"urls\")\n if summaries.get(sample):\n summaries[sample].update({'metadata': read_set})\n else:\n continue\n return summaries", "def _showMostReferenced(self):\n self._console_output(\"Calculating most referenced functions...\")\n self.table_label.setText(\"Most referenced functions\")\n\n most_referenced = self.ba.most_referenced_functions()\n\n self.table.setColumnCount(3)\n self.table.setHorizontalHeaderLabels((\"Address\", \"References\", \"Name\"))\n self.table.clearContents()\n self.table.setRowCount(0)\n\n idx = 0\n\n # Fill with contents\n for f_ea, (ref_nr, ref_name) in most_referenced:\n\n self.table.insertRow(idx)\n addr_item = QTableWidgetItem(\"%x\" % f_ea)\n addr_item.setFlags(addr_item.flags() ^ QtCore.Qt.ItemIsEditable)\n ref_item = cw.NumQTableWidgetItem(\"%d\" % ref_nr)\n name_item = QTableWidgetItem(ref_name)\n\n self.table.setItem(idx, 0, addr_item)\n self.table.setItem(idx, 1, ref_item)\n self.table.setItem(idx, 2, name_item)\n\n idx += 1", "def _get_summary(self):\n\n logger.warning('_get_summary() has been deprecated since 3.6.4. '\n 'Use the summary decorator instead')\n return self.summary", "def summary_string(self) -> str:", "def display_summary( id, extra_text='' ):\n s = Summaries[ id ]\n \n title = ( s.title if s.title[-1]!='.' else s.title[:-1] )\n title = title[:150].rstrip() + ('' if len(title)<=150 else '...')\n if s.doi!='':\n title = '<a href=http://dx.doi.org/%s>%s</a>' % (s.doi, title)\n \n authors = ', '.join( s.authors[:5] ) + ('' if len(s.authors)<=5 else ', ...')\n \n lines = [\n title,\n authors,\n str(s.year),\n '<small>id: %d%s</small>' % (id, extra_text)\n ] \n print '<blockquote>%s</blockquote>' % '<br>'.join(lines)", "def test_description_with_ref() -> None:\n soup = generate_case(\"description_with_ref\")\n\n tests.html_schema_doc_asserts.assert_descriptions(\n soup, [\"We should see this\", \"inner description\", \"We should see this too\"]\n )", "def doc_summary(lines):\n summary = []\n for line in lines:\n stripped = line.strip().lower()\n if (stripped.startswith('to use this normalizer') or\n stripped.startswith('use ``method')):\n continue\n if (line.startswith('Parameters') or line.startswith('Example')\n or line.startswith('.. note::')):\n break\n summary.append(line)\n return summary", "def summary(self):\n raise NotImplementedError", "def test_summary(self):\n self.db.upload(\"pkg1-0.3a2.tar.gz\", BytesIO(b\"test1234\"), \"pkg1\", \"0.3a2\")\n self.db.upload(\"pkg1-1.1.tar.gz\", BytesIO(b\"test1234\"), \"pkg1\", \"1.1\")\n p1 = self.db.upload(\n \"pkg1a2.tar.gz\", BytesIO(b\"test1234\"), \"pkg1\", \"1.1.1a2\", \"summary\"\n )\n p2 = self.db.upload(\n \"pkg2.tar.gz\", BytesIO(b\"test1234\"), \"pkg2\", \"0.1dev2\", \"summary\"\n )\n summaries = self.db.summary()\n self.assertCountEqual(\n summaries,\n [\n {\n \"name\": \"pkg1\",\n \"summary\": \"summary\",\n \"last_modified\": p1.last_modified.replace(tzinfo=UTC),\n },\n {\n \"name\": \"pkg2\",\n \"summary\": \"summary\",\n \"last_modified\": p2.last_modified.replace(tzinfo=UTC),\n },\n ],\n )", "def references(df, sort='rank', ascending=True, output='list', limit=-1):\n \n ref_counts = list(df['references'].value_counts().items())\\\n\n if output == 'string':\n ref_string = \"\"\n if sort == 'rank':\n if ascending:\n ref_number = 1\n else:\n ref_number = len(ref_counts) + 1\n for ref_pair in ref_counts:\n if (ascending == True) and (limit != -1) and (ref_number > limit):\n break\n elif (ascending == False) and (limit != -1) and (ref_number < (len(ref_counts) + 1 - limit)):\n break\n ref_string += '--------\\n'\n ref_string += f\"Most common reference {ref_number}\\n\"\n ref_string += '\\n'\n source_number = 1\n for ref in ref_pair[0]:\n ref_string += f\"Source number: {source_number}\\n\"\n ref_string += f\"Title: {ref['title']}\\n\"\n ref_string += f\"Author: {ref['author']}\\n\"\n ref_string += f\"Publisher: {ref['publisher']}\\n\"\n ref_string += f\"Publisher Location: {ref['publisherLocation']}\\n\"\n ref_string += f\"Year: {ref['year']}\\n\"\n ref_string += f\"Reference Type: {ref['refType']}\\n\"\n ref_string += f\"Reference Key: {ref['key']}\\n\"\n ref_string += '\\n'\n source_number += 1\n ref_string += f\"Total Occurrences: {ref_pair[1]}\\n\"\n ref_string += '--------\\n'\n ref_string += '\\n'\n if ascending: \n ref_number += 1\n else:\n ref_number -= 1\n elif sort == 'alphabet':\n # TODO: alphabetical sorting\n pass\n else:\n raise ValueError(f\"Invalid parameter for sort '{sort}' - Accepted values are 'rank' or 'alphabet'\")\n return ref_string\n elif output == 'list':\n return ref_counts\n else:\n raise ValueError(f\"Invalid parameter for output '{output}' - Accepted values are 'list' or 'string'\")", "def quotes_summary(self, quotes_summary):\n\n self._quotes_summary = quotes_summary", "def _add_summary_reports(cnf, general_section, bcbio_structure):\n base_dirpath = dirname(bcbio_structure.multiqc_fpath)\n\n recs = []\n if bcbio_structure.is_rnaseq:\n recs = add_rna_summary_records(cnf, recs, general_section, bcbio_structure, base_dirpath)\n else:\n recs = add_dna_summary_records(cnf, recs, general_section, bcbio_structure, base_dirpath)\n # recs.append(_make_url_record(bcbio_structure.targqc_summary_fpath, general_section.find_metric(SEQQC_NAME), base_dirpath))\n # if verify_dir(bcbio_structure.flagged_regions_dirpath, is_critical=False):\n # url_val = OrderedDict(\n # [(region_type, join(bcbio_structure.flagged_regions_dirpath, 'flagged_' + region_type + '.html'))\n # for region_type in ['target', 'exons']])\n # rec = _make_url_record(url_val, general_section.find_metric(ABNORMAL_NAME), base_dirpath)\n # recs.append(rec)\n return recs", "def add_summary(self, summary):\n if isinstance(summary, six.binary_type):\n summary = tf.Summary.FromString(summary)\n assert isinstance(summary, tf.Summary), type(summary)\n for val in summary.value:\n if val.WhichOneof('value') == 'simple_value':\n val.tag = re.sub('tower[p0-9]+/', '', val.tag) # TODO move to subclasses\n suffix = '-summary' # issue#6150\n if val.tag.endswith(suffix):\n val.tag = val.tag[:-len(suffix)]\n self.stat_holder.add_stat(\n val.tag, val.simple_value,\n self.global_step, self.epoch_num)\n self.summary_writer.add_summary(summary, get_global_step_value())", "def test_rr_summary(results):\n # pylint: disable=unidiomatic-typecheck\n test_result = results.summary()\n assert type(test_result).__name__ == \"Summary\"\n assert type(test_result.tables) == list\n assert len(test_result.tables) == 3\n assert len(test_result.extra_txt) > 0", "def solutionsummary(self,whichstream_): # 3\n if not isinstance(whichstream_,streamtype): raise TypeError(\"Argument whichstream has wrong type\")\n res = self.__obj.solutionsummary(whichstream_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def summary_line_and_description():", "def _show_summary(self):\n print 'Summary:'\n print ' Reports downloaded successfully: %d' % self.counts\n print ' Reports not downloaded: %d\\n' % self.failed", "def clear_summary(self):\n self._summary.clear()", "def test_summaries(self):\n try:\n ans = str(self.model)\n except:\n assert False, \"Model __repr__ failed.\"\n\n try:\n print(self.model)\n except:\n assert False, \"Model print failed.\"\n\n try:\n self.model.summary()\n except:\n assert False, \"Model summary failed.\"", "def getSummary(self):\n return self.base.get(\"summary\", [])", "def set_references(self, references: IReferences):\n self.__references2 = references\n super(StatusRestService, self).set_references(references)\n self.__context_info = self._dependency_resolver.get_one_optional(\"context-info\")", "def solutionsummary(self,whichstream_):\n res = __library__.MSK_XX_solutionsummary(self.__nativep,whichstream_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def test_description_with_ref_link_to_reused_ref() -> None:\n soup = generate_case(\"description_with_ref\", GenerationConfiguration(link_to_reused_ref=False))\n\n tests.html_schema_doc_asserts.assert_descriptions(\n soup, [\"We should see this\", \"inner description\", \"We should see this too\", \"inner description\"]\n )", "def clearSummary(self):\n self.summary(DiagnosticStatus.OK, '')", "def print_summaries(summaries):\n\n for method, summary in summaries:\n print(method)\n print('')\n print(summary)\n print('')", "def summary(self):\n return self._summary", "def summary(self):\n return self._summary", "def summary(self):\n return self._summary", "def summary(self):\n return self._summary", "def summary(self):\n return self._summary", "def summary(self):\n return self._summary", "def summary(self):\n return self._summary", "def _parse_summary(self):\r\n if self._is_at_section():\r\n return\r\n\r\n summary = self._doc.read_to_next_empty_line()\r\n summary_str = \" \".join([s.strip() for s in summary]).strip()\r\n if re.compile('^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$').match(summary_str):\r\n self['Signature'] = summary_str\r\n if not self._is_at_section():\r\n self['Summary'] = self._doc.read_to_next_empty_line()\r\n else:\r\n self['Summary'] = summary\r\n\r\n if not self._is_at_section():\r\n self['Extended Summary'] = self._read_to_next_section()", "def _parse_summary(self):\r\n if self._is_at_section():\r\n return\r\n\r\n summary = self._doc.read_to_next_empty_line()\r\n summary_str = \" \".join([s.strip() for s in summary]).strip()\r\n if re.compile('^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$').match(summary_str):\r\n self['Signature'] = summary_str\r\n if not self._is_at_section():\r\n self['Summary'] = self._doc.read_to_next_empty_line()\r\n else:\r\n self['Summary'] = summary\r\n\r\n if not self._is_at_section():\r\n self['Extended Summary'] = self._read_to_next_section()", "def print_summary(metrics_list, labels_list):\n for metric, name in zip(metrics_list, labels_list):\n print('*' * 108)\n print(name)\n mean_inc_acc = []\n for i in range(metric.shape[0]):\n print('\\t', end='')\n for j in range(metric.shape[1]):\n print('{:5.2f}% '.format(100 * metric[i, j]), end='')\n if np.trace(metric) == 0.0:\n if i > 0:\n avg = 100 * metric[i, :i].mean()\n mean_inc_acc += [avg]\n print('\\tAvg.:{:5.2f}% '.format(avg), end='')\n else:\n avg = 100 * metric[i, :i + 1].mean()\n mean_inc_acc += [avg]\n print('\\tAvg.:{:5.2f}% '.format(avg), end='')\n print()\n print()\n\n # Computing AIA across all incremental states (thus excluding the first non-incremental state)\n print('\\tMean Incremental Acc.: {:5.2f}%'.format(np.mean(mean_inc_acc[1:])))\n print('*' * 108)", "def test_references() -> None:\n soup = generate_case(\"references\")\n\n tests.html_schema_doc_asserts.assert_property_names(\n soup,\n [\n \"a_gift\",\n \"file_prefix\",\n \"anchor_with_slash\",\n \"propertyA\",\n \"anchor_no_slash\",\n \"anchor_nested_reference\",\n \"same_file_anchor_with_slash\",\n \"same_file_anchor_no_slash\",\n \"same_file_nested_reference\",\n \"other_file_anchor\",\n \"with_wrap\",\n \"other_file_dot_anchor\",\n \"other_file_dot_dot_anchor\",\n \"other_file_only\",\n \"not_a_string\",\n \"multi_hierarchy_reference\",\n \"propertyA\",\n ],\n )\n tests.html_schema_doc_asserts.assert_descriptions(\n soup,\n [\n \"Testing $ref\",\n \"A gift, or is it?\",\n \"A gift, or is it?\",\n \"Description for object_def/items/propertyA\",\n \"Description for array_def\",\n \"Description for string_def\",\n \"The delivery is a gift, no prices displayed\",\n \"The delivery is a gift, no prices displayed\",\n \"The delivery is a gift, no prices displayed\",\n \"Test schema with a not\",\n \"Contents of propertyA in final.json\",\n ],\n )\n tests.html_schema_doc_asserts.assert_types(\n soup,\n [\n \"object\", # root\n \"string\", # a_gift\n \"string\", # file_prefix\n \"object\", # anchor_with_slash\n \"string\", # anchor_with_slash -> propertyA\n \"array of string\", # anchor_no_slash\n \"string\", # anchor_no_slash items\n \"string\", # anchor_nested_reference\n \"string\", # same_file_anchor_with_slash\n \"object\", # same_file_anchor_no_slash\n \"string\", # same_file_nested_reference\n \"object\", # other_file_anchor\n \"boolean\", # other_file_anchor -> with_wrap\n \"object\", # other_file_dot_anchor\n \"object\", # other_file_dot_dot_anchor\n \"object\", # other_file_only\n \"string\", # not_a_string, not\n \"object\", # multi_hierarchy_reference\n \"string\", # multi_hierarchy_reference -> propertyA\n ],\n )", "def addReferences(self, reference, service_uids):\n addedanalyses = []\n wf = getToolByName(self, 'portal_workflow')\n bsc = getToolByName(self, 'bika_setup_catalog')\n bac = getToolByName(self, 'bika_analysis_catalog')\n ref_type = reference.getBlank() and 'b' or 'c'\n ref_uid = reference.UID()\n postfix = 1\n for refa in reference.getReferenceAnalyses():\n grid = refa.getReferenceAnalysesGroupID()\n try:\n cand = int(grid.split('-')[2])\n if cand >= postfix:\n postfix = cand + 1\n except:\n pass\n postfix = str(postfix).zfill(int(3))\n refgid = 'I%s-%s' % (reference.id, postfix)\n for service_uid in service_uids:\n # services with dependents don't belong in references\n service = bsc(portal_type='AnalysisService', UID=service_uid)[0].getObject()\n calc = service.getCalculation()\n if calc and calc.getDependentServices():\n continue\n ref_uid = reference.addReferenceAnalysis(service_uid, ref_type)\n ref_analysis = bac(portal_type='ReferenceAnalysis', UID=ref_uid)[0].getObject()\n\n # Set ReferenceAnalysesGroupID (same id for the analyses from\n # the same Reference Sample and same Worksheet)\n # https://github.com/bikalabs/Bika-LIMS/issues/931\n ref_analysis.setReferenceAnalysesGroupID(refgid)\n ref_analysis.reindexObject()\n\n # copy the interimfields\n calculation = service.getCalculation()\n if calc:\n ref_analysis.setInterimFields(calc.getInterimFields())\n\n # Comes from a worksheet or has been attached directly?\n ws = ref_analysis.getBackReferences('WorksheetAnalysis')\n if not ws or len(ws) == 0:\n # This is a reference analysis attached directly to the\n # Instrument, we apply the assign state\n wf.doActionFor(ref_analysis, 'assign')\n addedanalyses.append(ref_analysis)\n\n self.setAnalyses(self.getAnalyses() + addedanalyses)\n\n # Initialize LatestReferenceAnalyses cache\n self.cleanReferenceAnalysesCache()\n\n # Set DisposeUntilNextCalibrationTest to False\n if (len(addedanalyses) > 0):\n self.getField('DisposeUntilNextCalibrationTest').set(self, False)\n\n return addedanalyses", "def test_summary(self):\n\n pandoc_default_files = [\n os.path.join(\n TEST_DEFAULT_FILES_PATH,\n \"valid_defaults_with_toc_and_citations.yaml\",\n )\n ]\n\n settings = get_settings(\n PANDOC_DEFAULT_FILES=pandoc_default_files,\n FORMATTED_FIELDS=FORMATTED_FIELDS,\n )\n pandoc_reader = PandocReader(settings)\n\n source_path = os.path.join(\n TEST_CONTENT_PATH, \"valid_content_with_citation.md\"\n )\n\n _, metadata = pandoc_reader.read(source_path)\n\n self.assertEqual(\n (\n \"<p>But this foundational principle of science has now been\"\n \" called into question by\"\n ' <a href=\"https://www.britannica.com/science/string-theory\">'\n \"String Theory</a>.</p>\\n\"\n ),\n str(metadata[\"summary\"]),\n )", "def record_summary(self, name, value, tags=None):\n identity = self.create_identity(name, tags, \"summary\")\n with self._lock:\n if identity in self._batch:\n merged_value = self._batch[identity]\n merged_value[\"count\"] += 1\n merged_value[\"sum\"] += value\n merged_value[\"min\"] = min(value, merged_value[\"min\"])\n merged_value[\"max\"] = max(value, merged_value[\"max\"])\n else:\n value = {\"count\": 1, \"sum\": value, \"min\": value, \"max\": value}\n self._batch[identity] = value", "def print_summary(self):\n self.model.summary()", "def get_references_using_handler(block, stream, references):\n block_handler = get_block_handler(block)\n references.update(block_handler.get_object_references(stream))\n return stream", "def get_refs(func):\n found = re.findall(\"References:(.*)\", func.__doc__, flags=re.DOTALL)\n if any(found):\n ref = \" \".join([s.strip() for s in found[0].split(\"\\n\")]).strip()\n return ref\n else:\n return \"\"", "def summary(self) -> str:\n pass", "def build_summary(self, summary_dict):\n for key, value in summary_dict.items():\n tf.summary.scalar(key, value)\n merged_op = tf.summary.merge_all()\n return merged_op", "def parse_summary(self, content):\n summary_list = content.xpath('.//div[@id=\"summaryfield\"]//text()')\n summary = ''\n for text in summary_list:\n summary += normalize_string(text)\n return summary", "def test_refs(self):\n sid = h5s.create_simple((10,10))\n self.assertEqual(h5i.get_ref(sid), 1)\n \n h5i.inc_ref(sid)\n self.assertEqual(h5i.get_ref(sid), 2)\n\n h5i.dec_ref(sid)\n self.assertEqual(h5i.get_ref(sid), 1)", "def test_summary(self):\n self.db.upload(\"pkg1-0.3a2.tar.gz\", BytesIO(b\"test1234\"), \"pkg1\", \"0.3a2\")\n self.db.upload(\"pkg1-1.1.tar.gz\", BytesIO(b\"test1234\"), \"pkg1\", \"1.1\")\n p1 = self.db.upload(\n \"pkg1a2.tar.gz\", BytesIO(b\"test1234\"), \"pkg1\", \"1.1.1a2\", \"summary\"\n )\n p2 = self.db.upload(\n \"pkg2.tar.gz\", BytesIO(b\"test1234\"), \"pkg2\", \"0.1dev2\", \"summary\"\n )\n summaries = self.db.summary()\n self.assertCountEqual(\n summaries,\n [\n {\"name\": \"pkg1\", \"summary\": \"summary\", \"last_modified\": ANY},\n {\"name\": \"pkg2\", \"summary\": \"summary\", \"last_modified\": ANY},\n ],\n )\n # Have to compare the last_modified fuzzily\n self.assertEqual(\n summaries[0][\"last_modified\"].utctimetuple(),\n p1.last_modified.utctimetuple(),\n )\n self.assertEqual(\n summaries[1][\"last_modified\"].utctimetuple(),\n p2.last_modified.utctimetuple(),\n )", "def rank_summaries(self, summary):\n summary_split = summary.split(\"@ highlight\")\n\n embedding_index = self.get_word_embeddings()\n sentence_vectors = []\n # get word count vector for each sentence\n for sentence in summary_split:\n words = nltk.word_tokenize(sentence)\n mean_vector_score = sum([embedding_index.get(\n word, np.zeros((100,))) for word in words])/len(words)\n sentence_vectors.append(mean_vector_score)\n\n # similarity matrix\n sim_matrix = self.get_similarity_matrix(sentence_vectors)\n # graph of matrix - retrieve a set of scores based on page rank algorithm\n pageRank_scores = self.get_graph(sim_matrix)\n # rank sentences based off scores and extract top one as the chosen sentence for training\n sent_scores = [(pageRank_scores[i], sent)\n for i, sent in enumerate(summary_split)]\n sent_scores = sorted(sent_scores, reverse=True)\n chosen_summary = sent_scores[0][1]\n return(chosen_summary)", "def test_summary_can_be_updated(self):\n # Create and add a summary to our test report\n summary, _ = CommentAndSummary.objects.get_or_create(note='summary', is_summary=True)\n self.report.internal_comments.add(summary)\n\n data = self.report_data.copy()\n new_summary = 'newest summary'\n data.update({'summary': new_summary, 'summary_id': summary.id})\n form = ReportEditForm(data, instance=self.report)\n\n self.assertTrue(form.is_valid())\n self.assertTrue('summary' in form.changed_data)\n\n form.save()\n\n self.report.refresh_from_db()\n summary.refresh_from_db()\n self.assertEqual(self.report.get_summary.note, new_summary)\n self.assertEqual(summary.note, new_summary)", "def load_references(self, collections, item):", "def summary(self, checkid):\r\n return summary.Summary(self, checkid)", "def test_xref(self):\n with sphinx_build('pyexample'):\n with open('_build/text/docfx_yaml/example.nap.Base.yml') as yml_file:\n data = yaml.safe_load(yml_file)\n for item in data['items']:\n if item['uid'] == 'example.nap.Base.ref':\n self.assertEqual(\n item['seealsoContent'],\n 'Depends on @example.example.Foo Relative reference on @example.nap.Base.foo'\n )", "def print_workflow_summary(workflow_stats ):\n\t# status\n\tworkflow_stats.set_job_filter('nonsub')\n\t# Tasks\n\ttotal_tasks = workflow_stats.get_total_tasks_status()\n\ttotal_succeeded_tasks = workflow_stats.get_total_succeeded_tasks_status()\n\ttotal_failed_tasks = workflow_stats.get_total_failed_tasks_status()\n\ttotal_unsubmitted_tasks = total_tasks -(total_succeeded_tasks + total_failed_tasks)\n\ttotal_task_retries = workflow_stats.get_total_tasks_retries()\n\ttotal_invocations = total_succeeded_tasks + total_failed_tasks + total_task_retries\n\t# Jobs\n\ttotal_jobs = workflow_stats.get_total_jobs_status()\n\ttotal_succeeded_jobs = workflow_stats.get_total_succeeded_jobs_status()\n\ttotal_failed_jobs = workflow_stats.get_total_failed_jobs_status()\n\ttotal_unsubmitted_jobs = total_jobs - (total_succeeded_jobs + total_failed_jobs )\n\ttotal_job_retries = workflow_stats.get_total_jobs_retries()\n\ttotal_job_instance_retries = total_succeeded_jobs + total_failed_jobs + total_job_retries\n\t# Sub workflows\n\tworkflow_stats.set_job_filter('subwf')\n\ttotal_sub_wfs = workflow_stats.get_total_jobs_status()\n\ttotal_succeeded_sub_wfs = workflow_stats.get_total_succeeded_jobs_status()\n\ttotal_failed_sub_wfs = workflow_stats.get_total_failed_jobs_status()\n\ttotal_unsubmitted_sub_wfs = total_sub_wfs - (total_succeeded_sub_wfs + total_failed_sub_wfs)\n\ttotal_sub_wfs_retries = workflow_stats.get_total_jobs_retries()\n\ttotal_sub_wfs_tries = total_succeeded_sub_wfs + total_failed_sub_wfs + total_sub_wfs_retries\n\n\t# tasks\n\tsummary_str = \"\"\n\tsummary_str += \"total_succeeded_tasks: \" + convert_to_str(total_succeeded_tasks)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_failed_tasks: \" + convert_to_str(total_failed_tasks)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_unsubmitted_tasks: \" + convert_to_str(total_unsubmitted_tasks)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_tasks: \" + convert_to_str(total_tasks)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_task_retries: \" + convert_to_str(total_task_retries)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_invocations: \" + convert_to_str(total_invocations)\n\tsummary_str += NEW_LINE_STR\n\n\n\tsummary_str += \"total_succeeded_jobs: \" + convert_to_str(total_succeeded_jobs)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_failed_jobs: \" + convert_to_str(total_failed_jobs)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_unsubmitted_jobs: \" + convert_to_str(total_unsubmitted_jobs)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_jobs:\" + convert_to_str(total_jobs)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_job_retries: \" + str(total_job_retries)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_job_instance_retries:\" + convert_to_str(total_job_instance_retries)\n\tsummary_str += NEW_LINE_STR\n\n\n\tsummary_str += \"total_succeeded_sub_wfs: \" + convert_to_str(total_succeeded_sub_wfs)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_failed_sub_wfs: \" + convert_to_str(total_failed_sub_wfs)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_unsubmitted_sub_wfs: \" + convert_to_str(total_unsubmitted_sub_wfs)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_sub_wfs: \" + convert_to_str(total_sub_wfs)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_sub_wfs_retries: \" + str(total_sub_wfs_retries)\n\tsummary_str += NEW_LINE_STR\n\tsummary_str += \"total_sub_wfs_tries: \" + convert_to_str(total_sub_wfs_tries)\n\tsummary_str += NEW_LINE_STR\n\n\tworkflow_states_list = workflow_stats.get_workflow_states()\n\tworkflow_wall_time = stats_utils.get_workflow_wall_time(workflow_states_list)\n\n\tif workflow_wall_time is None:\n\t\tsummary_str += \"workflow_runtime: -\"\n\telse:\n\t\tsummary_str += \"workflow_runtime: %-20s (total %d seconds)\" % \\\n\t\t\t\t(format_seconds(workflow_wall_time), (workflow_wall_time))\n\tsummary_str += NEW_LINE_STR\n\tworkflow_cum_job_wall_time = workflow_stats.get_workflow_cum_job_wall_time()[0]\n\tif workflow_cum_job_wall_time is None:\n\t\tsummary_str += \"cumulative_workflow_runtime_kickstart: -\"\n\telse:\n\t\tsummary_str += \"cumulative_workflow_runtime_kickstart: %-20s (total %d seconds)\" % \\\n\t\t\t(format_seconds(workflow_cum_job_wall_time),workflow_cum_job_wall_time)\n\tsummary_str += NEW_LINE_STR\n\tsubmit_side_job_wall_time = workflow_stats.get_submit_side_job_wall_time()[0]\n\tif submit_side_job_wall_time is None:\n\t\tsummary_str += \"cumulative_workflow_runtime_dagman: -\"\n\telse:\n\t\tsummary_str += \"cumulative_workflow_runtime_dagman: %-20s (total %d seconds)\" % \\\n\t\t\t(format_seconds(submit_side_job_wall_time), submit_side_job_wall_time)\n\treturn summary_str", "def test_update_summary(cards_db):\n i = cards_db.add_card(Card(\"foo\", owner=\"me\", state=\"done\"))\n cards_db.update_card(i, Card(summary=\"bar\", state=None))\n\n mod = cards_db.get_card(i)\n assert mod == Card(\"bar\", owner=\"me\", state=\"done\")", "def show_summary_help(self):\n QMessageBox.question(self, 'Summarization help', get_summarization_help(),\n QMessageBox.Ok | QMessageBox.NoButton)", "def summary(self):\n res = \", \".join(\n elem[\"summary\"] for elem in self.status[\"health\"][\"summary\"]\n )\n if res:\n return res\n elif self.detail:\n return self.detail[0]\n return \"\"", "def getSummary(self):\n return self.summary", "def test_references(self):\n with sphinx_build('pyexample'):\n with open('_build/text/docfx_yaml/example.example.yml') as yml_file:\n data = yaml.safe_load(yml_file)\n # Test that references are properly put at the top-level\n self.assertTrue(\n 'references' in data\n )\n # Check reference parent\n self.assertEqual(\n data['references'][0]['parent'],\n 'example.example'\n\n )", "def fix_references(self) -> None:\n\n\t\tself.log_info(\"--- Searching for references to variants ---\")\n\t\tfor publication in Publication.objects.filter(variant_of__isnull=False):\n\t\t\tvariant = publication.variant_of\n\t\t\torigs = PublicationReference.objects.filter(reference=publication)\n\t\t\tfor orig in origs:\n\t\t\t\tif PublicationReference.objects.filter(reference=variant, publication=orig.publication).exists():\n\t\t\t\t\tcontinue\n\t\t\t\tfixed = PublicationReference(\n\t\t\t\t\treference=variant,\n\t\t\t\t\tpublication=orig.publication,\n\t\t\t\t\tidentifier=('' if orig.identifier is None else orig.identifier) + \"*\",\n\t\t\t\t)\n\t\t\t\ttry:\n\t\t\t\t\tfixed.full_clean()\n\t\t\t\t\tfixed.save()\n\t\t\t\t\tself.log_success(f\"Added reference: {publication} -- {fixed.identifier} -> {variant}\")\n\t\t\t\texcept ValidationError as e:\n\t\t\t\t\traise CommandError(f\"{publication} -- {fixed.identifier} -> {variant}: {e}\")", "async def setIncident_summary(\n self, eventID: str, incidentNumber: int, summary: str, author: str\n ) -> None:", "def edit_summary():\n with open(\"docs/SUMMARY.md\", \"r\") as opened_file:\n summary = opened_file.readlines()\n\n with open(\"docs/SUMMARY.md\", \"w\") as opened_file:\n for line in summary:\n if not any(ext in line for ext in ignore_list):\n opened_file.write(line)", "def build_reference(sets_to_reference):\n\n number_to_uuid = {}\n card_reference = {}\n name_to_uuid = {}\n uuid_to_number = {}\n\n print(\"- Building internal Card Reference -\")\n for setName in tqdm(sets_to_reference) :\n # Fix 1 on WIN systems since CON.json is reserved :\n if setName == 'CON':\n setName = 'CON_'\n # End Fix 1\n with open(ROOT_DIR + 'data/sets/' + setName + '.json') as f:\n # Fix 2 on WIN systems since CON.json is reserved :\n if setName == 'CON_':\n setName = 'CON'\n # End Fix 2\n data = json.load(f)\n name_to_uuid[setName]= {} \n number_to_uuid[setName]= {}\n card_reference[setName]= {}\n uuid_to_number[setName]= {}\n for item in data['data']['cards']:\n #print(item)\n number_to_uuid[setName][item['number']] = item['uuid']\n name_to_uuid[setName][item['name']] = item['uuid']\n uuid_to_number[setName][item['uuid']] = item['number']\n foreignName = {}\n for languageData in item['foreignData']:\n if languageData['language'] == 'Spanish' and 'ES' in languages_to_reference:\n name_to_uuid[setName][languageData['name']] = item['uuid']\n language = 'ES'\n foreignName[language] = languageData['name']\n elif languageData['language'] == 'French' and 'FR' in languages_to_reference:\n name_to_uuid[setName][languageData['name']] = item['uuid']\n language = 'FR'\n foreignName[language] = languageData['name']\n elif languageData['language'] == 'German' and 'DE' in languages_to_reference:\n name_to_uuid[setName][languageData['name']] = item['uuid']\n language = 'DE'\n foreignName[language] = languageData['name']\n elif languageData['language'] == 'Italian' and 'IT' in languages_to_reference:\n name_to_uuid[setName][languageData['name']] = item['uuid']\n language = 'IT'\n foreignName[language] = languageData['name']\n elif languageData['language'] == 'Portuguese' and 'PT' in languages_to_reference:\n name_to_uuid[setName][languageData['name']] = item['uuid']\n language = 'PT'\n foreignName[language] = languageData['name']\n elif languageData['language'] == 'Japanese' and 'JP' in languages_to_reference:\n name_to_uuid[setName][languageData['name']] = item['uuid']\n language = 'JP'\n foreignName[language] = languageData['name']\n elif languageData['language'] == 'Korean' and 'KO' in languages_to_reference:\n name_to_uuid[setName][languageData['name']] = item['uuid']\n language = 'KO'\n foreignName[language] = languageData['name']\n elif languageData['language'] == 'Russian' and 'RU' in languages_to_reference:\n name_to_uuid[setName][languageData['name']] = item['uuid']\n language = 'RU'\n foreignName[language] = languageData['name']\n elif languageData['language'] == 'Chinese' and 'ZH' in languages_to_reference:\n name_to_uuid[setName][languageData['name']] = item['uuid']\n language = 'ZH'\n foreignName[language] = languageData['name']\n card_reference[setName][item['uuid']] = {'name' : item['name'],\n 'colorIdentity' : item['colorIdentity'],\n 'convertedManaCost' : item['convertedManaCost'],\n 'legalities' : item['legalities'],\n 'foreignName' : foreignName,\n 'number' : item['number'],\n 'rarity' : item['rarity'],\n 'setCode' : item['setCode'],\n 'subtypes' : item['subtypes'],\n 'supertypes' : item['supertypes'],\n 'types' : item['types'],\n 'uuid' : item['uuid'] }\n try :\n card_reference[setName][item['uuid']]['keywords'] = item['keywords']\n except :\n pass\n try :\n card_reference[setName][item['uuid']]['power'] = item['power']\n except :\n pass\n try :\n card_reference[setName][item['uuid']]['toughness'] = item['toughness']\n except :\n pass \n try :\n card_reference[setName][item['uuid']]['manaCost'] = item['manaCost']\n except :\n pass\n # Token version of the set : setname is preceded by 'T' \n name_to_uuid['T'+setName]= {} \n number_to_uuid['T'+setName]= {}\n card_reference['T'+setName]= {}\n uuid_to_number['T'+setName]= {} \n for item in data['data']['tokens']:\n number_to_uuid['T'+setName][item['number']] = item['uuid']\n name_to_uuid['T'+setName][item['name']] = item['uuid']\n uuid_to_number['T'+setName][item['uuid']] = item['number']\n card_reference['T'+setName][item['uuid']] = {'name' : item['name'],\n 'colorIdentity' : item['colorIdentity'],\n 'convertedManaCost' : 0,\n 'number' : item['number'],\n 'setCode' : item['setCode'],\n 'subtypes' : item['subtypes'],\n 'supertypes' : item['supertypes'],\n 'types' : item['types'],\n 'uuid' : item['uuid'] }\n return (card_reference, name_to_uuid, number_to_uuid, uuid_to_number)", "def build_summary(self):\n for k, v in self.metrics.items():\n tf.summary.scalar(k, v)\n \n self.summary_op = tf.summary.merge_all()", "def test_summary(self):\n cache = DummyCache()\n cache.upload(\"pkg1-0.3.tar.gz\", BytesIO(b\"test1234\"))\n cache.upload(\"pkg1-1.1.tar.gz\", BytesIO(b\"test1234\"))\n p1 = cache.upload(\n \"pkg1a2.tar.gz\", BytesIO(b\"test1234\"), \"pkg1\", \"1.1.1a2\", \"summary\"\n )\n p2 = cache.upload(\n \"pkg2.tar.gz\", BytesIO(b\"test1234\"), \"pkg2\", \"0.1dev2\", \"summary\"\n )\n summaries = cache.summary()\n self.assertCountEqual(\n summaries,\n [\n {\n \"name\": \"pkg1\",\n \"summary\": \"summary\",\n \"last_modified\": p1.last_modified,\n },\n {\n \"name\": \"pkg2\",\n \"summary\": \"summary\",\n \"last_modified\": p2.last_modified,\n },\n ],\n )", "def references(md5):\n u = Upload.objects.filter(md5=md5).first()\n if not u:\n abort(404)\n # first, is this searchable?\n is_searchable = False\n count = elastic.count('page', filter={'md5': md5})\n if count > 0:\n is_searchable = True\n #annotations = Reference.objects.filter(upload=u, ref_url__exists=True)\n annotations = Reference.objects.filter(upload=u).order_by('ref_pos')\n # create a list of referenced things\n references = {'references':[], 'searchable': is_searchable}\n for a in annotations:\n try:\n references['references'].append({\n 'pos_x': a.pos_x, \n 'pos': a.pos, \n 'ref': a.ref_upload.md5, \n 'ref_pos': a.ref_pos\n })\n except:\n pass\n return jsonify(references)", "def test_summary_registred(self, mock_get, mock_get_categories):\n\n # We mock ai_list\n mock_get.return_value = {'ai_list': []}\n response = self.client.get(reverse('studio:summary'))\n self.assertEqual(response.status_code, 200)", "def _showStringXrefs(self):\n\n # Retrieve some config values\n show_misc.entropy = self.config.calculate_misc.entropy\n show_unique_s = self.config.display_unique_strings\n\n self._console_output(\"Calculating string references...\")\n\n self.ba.calculate_strings_list()\n s_ref_list = self.ba.get_string_references()\n\n # Found any references at all?\n nrows = len(s_ref_list)\n if not nrows:\n self._console_output(\"[!] No string references found\", err = True)\n return\n\n if show_misc.entropy:\n self.table.setColumnCount(3)\n self.table.setHorizontalHeaderLabels((\"Address\", \"String\", \"misc.entropy\"))\n\n else:\n self.table.setColumnCount(2)\n self.table.setHorizontalHeaderLabels((\"Address\", \"String\"))\n\n self.table_label.setText(\"String references in current function\")\n self.table.clearContents()\n self.table.setRowCount(0)\n\n # Fill the table\n displayed_strings = []\n\n idx = 0\n for (addr, s) in s_ref_list:\n if show_unique_s and s in displayed_strings:\n continue\n\n displayed_strings.append(s)\n\n self.table.insertRow(idx)\n addr_item = QTableWidgetItem(\"%08x\" % addr)\n addr_item.setFlags(addr_item.flags() ^ QtCore.Qt.ItemIsEditable)\n string_item = QTableWidgetItem(\"%s\" % s)\n string_item.setFlags(string_item.flags() ^ QtCore.Qt.ItemIsEditable)\n\n self.table.setItem(idx, 0, addr_item)\n self.table.setItem(idx, 1, string_item)\n\n if show_misc.entropy:\n misc.entropy_item = cw.NumQTableWidgetItem(\"%.4f\" % misc.entropy(s))\n self.table.setItem(idx, 2, misc.entropy_item)\n\n idx += 1", "def build_summary(val1=None, val2=None, val3=None):\n summary = []\n if val1 is not None:\n summary.append(val1)\n if val2 is not None:\n summary.append(val2)\n if val3 is not None:\n summary.append(val3)\n if not summary:\n return None\n return ' : '.join(summary)", "def present_summary(services, methods, count, backup):\n print_heading(\"Summary\")\n if backup is not None:\n writer(f\"Backup: {backup}\")\n writer(f\"Showing {count[0]}/{len(services)} Services\")\n writer(f\"Showing {count[1]}/{len(methods)} Methods\\n\")" ]
[ "0.58870405", "0.58870405", "0.58870405", "0.5803425", "0.5781204", "0.5522118", "0.5503257", "0.54215056", "0.53986794", "0.53744864", "0.53230405", "0.5302629", "0.52970827", "0.5282875", "0.5266578", "0.52372295", "0.5206945", "0.5153626", "0.5151684", "0.5148715", "0.51414406", "0.513884", "0.5113788", "0.5096503", "0.5089631", "0.5086856", "0.5086806", "0.508191", "0.5079586", "0.5074926", "0.50606024", "0.50519454", "0.498465", "0.49799615", "0.4977733", "0.49756002", "0.49656308", "0.49626714", "0.49579912", "0.4955442", "0.49550566", "0.49515706", "0.49225795", "0.49201167", "0.49151644", "0.49133366", "0.48872617", "0.4885063", "0.48634687", "0.48562407", "0.48537642", "0.48364845", "0.4828343", "0.48278353", "0.48253134", "0.4824352", "0.48218393", "0.48218393", "0.48218393", "0.48218393", "0.48218393", "0.48218393", "0.48218393", "0.48217204", "0.48217204", "0.4813955", "0.4805884", "0.48044726", "0.4803097", "0.47962028", "0.47911835", "0.4784326", "0.47843072", "0.47834933", "0.4781381", "0.47813702", "0.47739768", "0.47715017", "0.47493574", "0.47492936", "0.47470236", "0.4740841", "0.47284788", "0.47268978", "0.4726465", "0.4726461", "0.47239625", "0.4720216", "0.4713134", "0.4712306", "0.4711902", "0.47106326", "0.47058347", "0.47053674", "0.46980813", "0.46979403", "0.46973798", "0.4696966", "0.46958622", "0.46934485" ]
0.6722548
0
Display unpublished Draft Entries
def drafts(): query = Entry.drafts().order_by(Entry.last_mod_date.desc()) return object_list('index.html', query)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def drafts_view(self, request, object_id, extra_context=None):\n opts = self.model._meta\n action_list = [{\"revision\": version.revision,\n \"url\": reverse(\"admin:%s_%s_draft\" % (opts.app_label, opts.module_name), args=(version.object_id, version.revision.id))}\n for version in self.get_draft_versions(object_id).select_related(\"revision\")]\n context = {\n \"action_list\": action_list, \n \"title\": _(\"Unpublished items\"), \n 'draft_view':True, \n 'has_draft':self.has_draft(object_id)\n }\n context.update(extra_context or {})\n return super(EasyPublisher, self).history_view(request, object_id, context)", "def get_drafts(self):\n return self.filter(status=\"D\")", "def post_list(request):\n # Only show the posts that have been published\n posts = Post.objects.filter(date_published__isnull=False)\n return render(request,\n 'blog/post_list.html',\n {'posts': posts}\n )", "def get_drafts(self, **kwargs):\n default_kwargs = { \"order\": \"updated_at desc\" }\n default_kwargs.update(kwargs)\n return self.get_messages(statuses=[\"draft\"], **default_kwargs)", "def draft(page):\r\n return app_index(page, cached_apps.get_draft, 'draft',\r\n False, True)", "def render_archives():\n\n\tq = \"SELECT title, text, id, project FROM entries WHERE archived=1 ORDER BY id desc\"\n\tcur = g.db.execute(q)\n\trows = cur.fetchall()\n\tentries = [dict(\n\t\t\ttitle=row[0], \n\t\t\ttext=row[1], \n\t\t\tid=row[2], \n\t\t\tproject=row[3]) for row in rows]\n\n\t\"\"\" filter catagories as to not repeat \"\"\"\n\tfiltered_catagories = set([ x[3] for x in rows ])\n\n\treturn render_template('show_entries.html', \n\t\tentries=entries, \n\t\tcatagories=filtered_catagories,\n\t\tfiltered=False,\n\t\tarchived=True,\n\t\t)", "def list_drafts(self, account):\n account = Account(account, hive_instance=self.hive)\n return self._conveyor_method(account, None,\n \"conveyor.list_drafts\",\n [account['name']])", "def get_draft_revisions(self, object_id):\n content_type = ContentType.objects.get_for_model(self.model)\n return Revision.objects.filter(\n version__object_id=object_id, \n version__content_type=content_type,\n easypublishermetadata__status='draft',\n easypublishermetadata__language=get_language()\n ).select_related().distinct()", "def draft_message(request):\n query = models.Message.query(\n models.Message.sender == request.user.email(),\n models.Message.draft == True,\n ancestor=request.issue.key)\n if query.count() == 0:\n draft_message = None\n else:\n draft_message = query.get()\n if request.method == 'GET':\n return HttpTextResponse(draft_message.text if draft_message else '')\n return HttpTextResponse('An error occurred.', status=500)", "def test_home_view_with_a_draft_post(self):\n category = create_category('Category 1')\n author = create_author('Author 1')\n create_post(category=category, author=author, name='Draft Post', content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.', status='Draft')\n create_question(question_text=\"Past question.\", days=-30)\n response = self.client.get(reverse('blog.home'))\n self.assertContains(response, \"No posts are available.\")\n self.assertQuerysetEqual(response.context['posts'], [])", "def list_drafts(self) -> PagingList[Draft]:\n return PagingList(self._generate_drafts, 128)", "def history_view(self, request, object_id, extra_context=None):\n defaults = {\n 'has_draft': self.has_draft(object_id)\n }\n defaults.update(extra_context or {})\n return super(EasyPublisher, self).history_view(request, object_id, defaults)", "def isDraft(self): #$NON-NLS-1$\r", "def get_haikus_unposted(cls, db_session) -> list:\n q = (\n db_session.query(cls)\n .filter(cls.date_posted == None) # noqa: E711\n .filter(cls.date_deleted == None) # noqa: E711\n )\n return q.all()", "def drafts(self):\n if self._drafts is None:\n if self._initialize_drafts():\n self._save_drafts()\n return self._drafts", "def public_timeline():\n return render_template('timeline.html', messages=query_db('''\n select message.*, user.* from message, user\n where message.author_id = user.user_id\n order by message.pub_date desc limit ?''', [PER_PAGE]))", "def public_timeline():\n return render_template('timeline.html', messages=query_db('''\n select message.*, user.* from message, user\n where message.author_id = user.user_id\n order by message.pub_date desc limit ?''', [PER_PAGE]))", "def test_show_post_view_with_a_draft_post(self):\n category = create_category('Category 1')\n author = create_author('Author 1')\n draft_post = create_post(category=category, author=author, name='Draft Post', content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.', status='Draft')\n url = reverse('blog.post', args=(draft_post.id,))\n create_question(question_text=\"Past question.\", days=-30)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 404)", "def education_post_list(request):\n posts = EducationBlogPost.objects.filter(published_date__lte=timezone.now()\n ).order_by('-published_date')\n return render(request, \"education_center/education_blogposts.html\", {'posts': posts})", "def draft_message(request):\n query = models.Message.query(\n models.Message.issue_key == request.issue.key,\n models.Message.sender == request.user.email(),\n models.Message.draft == True)\n if query.count() == 0:\n draft_message = None\n else:\n draft_message = query.get()\n if request.method == 'GET':\n return _get_draft_message(draft_message)\n elif request.method == 'POST':\n return _post_draft_message(request, draft_message)\n elif request.method == 'DELETE':\n return _delete_draft_message(draft_message)\n return HttpTextResponse('An error occurred.', status=500)", "def get_published(self):\n return self.filter(status=\"P\")", "def list_unresolved(self): # new\n feed = self.get_feed(limit=999999)\n posts = feed.get(\"threads\")\n\n for s in posts:\n if (\n s.get(\"approved_status\", \"approved\") != \"rejected\"\n and (\n s.get(\"type\", \"question\") != \"post\" or s.get(\"is_megathread\", True)\n )\n and not s.get(\"is_answered\", True)\n and s.get(\"unresolved_count\", 1)\n ):\n yield s", "def show_entries():\n db = get_db()\n cur = db.execute('select id, title, ingredients, steps, tags, \\\n url from entries order by id asc')\n entries = cur.fetchall()\n return render_template('show_entries.html', entries=entries)", "def _get_draft_message(draft):\n return HttpTextResponse(draft.text if draft else '')", "def test_get_drafts(self):\n r1 = Recipes.objects.create(chef=self.user, name=\"Recipe 1\", draft=True)\n r2 = Recipes.objects.create(chef=self.user, name=\"Recipe 2\", draft=False)\n\n url = '/0/chefs/%i/drafts' % self.user.pk\n\n resp = self.client.get(url)\n self.assertPermissionDenied(resp)\n\n headers = self.login()\n resp = self.client.get(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('drafts', resp.data)\n self.assertEqual(1, len(resp.data['drafts']))\n keys = (\"liked\", \"public_url\", \"edit_date\", \"ingredients\", \"shared\", \"tags\", \"commented\",\n \"private\", \"id\", \"chef\", \"reported\", \"nb_shares\", \"added\", \"nb_added\",\n \"nb_comments\", \"draft\", \"commensals\", \"creation_date\", \"nb_likes\", \"name\",\n \"products\", \"prep_time\", \"serves\", \"bought\", \"book_for_sale\", \"description\")\n self.assertEqual(set(keys), set(resp.data['drafts'][0].keys()))\n self.assertEqual(r1.pk, resp.data['drafts'][0]['id'])", "def test_home_view_with_draft_post_and_published_post(self):\n category = create_category('Category 1')\n author = create_author('Author 1')\n create_post(category=category, author=author, name='Published Post',\n content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.',\n status='Published')\n create_post(category=category, author=author, name='Draft Post',\n content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.',\n status='Draft')\n create_question(question_text=\"Past question.\", days=-30)\n response = self.client.get(reverse('blog.home'))\n self.assertQuerysetEqual(\n response.context['posts'],\n ['<Post: Published Post>']\n )", "def show_entries_stream():\n pass", "def draft_messages(self):\n return self._get_messages_from_folder_name('Drafts')", "def entries_index(request):\n blog_entries = Entry.objects.filter(status=2).order_by('-pub_date')\n paginator = Paginator(blog_entries, 4)#4 posts/page\n try:\n page = int(request.GET.get('page','1'))\n except ValueError:\n page = 1\n try:\n entries = paginator.page(page)\n except (EmptyPage, InvalidPage):\n entries = paginator.page(paginator.num_pages)\n return render_to_response('blog/blog.html', {'entries':entries}, RequestContext(request))", "def full_listing(request, urlname):\n\tif request.user.is_authenticated():\n\t\tblog = Blog.qa_objects.get(urlname=urlname)\n\t\tposts = BlogEntry.qa_objects.filter(blog=blog, posting_time__lte=datetime.now()).order_by('-posting_time')\n\telse:\n\t\tblog = Blog.objects.get(urlname=urlname)\n\t\tposts = BlogEntry.objects.filter(blog=blog).order_by('-posting_time')\n\treturn render_to_response('blogs/full.html', {'blog': blog, 'posts': posts}, context_instance=RequestContext(request))", "def _get_live_entries(self):\n from article.models import Entry\n return self.entry_set.filter(status__exact=Entry.LIVE_STATUS)", "def delete_drafts(request):\n query = models.Comment.query(\n models.Comment.author == request.user, models.Comment.draft == True,\n ancestor=request.issue.key)\n keys = query.fetch(keys_only=True)\n ndb.delete_multi(keys)\n request.issue.calculate_draft_count_by_user()\n request.issue.put()\n return HttpResponseRedirect(\n reverse(publish, args=[request.issue.key.id()]))", "def atom_feed():\n from simblin.lib.rfc3339 import rfc3339\n posts = Post.query.filter_by(visible=True).order_by(Post.datetime.desc())\n updated = posts.first().datetime\n response = make_response(render_template('atom.xml', posts=posts, \n updated=updated, rfc3339=rfc3339))\n response.mimetype = \"application/atom+xml\"\n return response", "def view_entries(search_query=None):\n\n # search_query se ha añadido como funcionalidad extra (search_entries)\n # Por defecto es None, por si no se quiere usar la funcionalidad extra\n # Guarda en una lista todas las entrdas del diario recogidas con el select\n entries = Entry.select().order_by(Entry.timestamp.desc())\n\n if search_query:\n # Con where filtramos todas las entries que cumplen la condicion\n entries = entries.where(Entry.content.contains(search_query))\n\n # Ahora toca mostrarlas con un formato adecuado\n for entry in entries:\n # Primero la fecha\n timestamp = entry.timestamp.strftime('%A %b %d, %Y %I:%M%p')\n clear()\n print(timestamp)\n print('+'*len(timestamp))\n print(entry.content)\n print('\\n\\n'+'+'*len(timestamp) + '\\n') # Pone tantos + como caracteres haya\n print('Enter| siguiente entrada')\n print('d| Borrar entrada')\n print('q| Salir al menu')\n\n # Obtener la siguiente entrada del usuario\n next_action = input('Accion a realizar: [Nq]').lower().strip()\n\n if next_action == 'q':\n break\n elif next_action == 'd':\n delete_entry(entry)", "def get_queryset(self):\n return self.queryset.filter(contest__publish_date__lte=timezone.now())", "def articles(self):\n return self.get_queryset().filter(content_type__model='article').order_by('-articles__published_at')", "def get_queryset(self):\n return self.queryset.filter(theme__contest__publish_date__lte=timezone.now())", "def test_get_list_published_user_drafts(self):\n story1 = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published',\n language=\"en\", author=self.user)\n story2 = create_story(title=\"Test Story 2\", summary=\"Test Summary 2\",\n byline=\"Test Byline 2\", status='draft',\n language=\"en\", author=self.user)\n self.api_client.client.login(username=self.username, password=self.password)\n uri = '/api/0.1/stories/'\n resp = self.api_client.get(uri)\n self.assertValidJSONResponse(resp)\n self.assertEqual(len(self.deserialize(resp)['objects']), 2)\n story_ids = [story['story_id'] for story in self.deserialize(resp)['objects']]\n self.assertIn(story1.story_id, story_ids)\n self.assertIn(story2.story_id, story_ids)", "def show_rsd_thing(request, date, plain=False,\n regular=False, events=False, lost_and_found=False, jobs=False):\n \n if regular:\n regular = Announcement.regular.running_on(date).order_by('-date_start', 'pk')\n if events:\n events = Announcement.events.running_on(date).order_by('event_date', 'event_time', 'pk')\n if lost_and_found:\n lost_and_found = Announcement.lost_and_found.running_on(date).order_by('-date_start', 'pk')\n if jobs:\n midnight = datetime.datetime.combine(date, datetime.time(23, 59))\n jobs = JobListing.published.order_by('is_filled', '-pub_date') \\\n .filter(pub_date__lte=midnight) \\\n .filter(pub_date__gte=midnight - datetime.timedelta(days=7))\n if date == datetime.date.today():\n jobs = jobs.filter(is_filled=False)\n \n if not any(x.count() if x else 0 for x in (regular, events, lost_and_found, jobs)):\n raise Http404\n \n tomorrow = date + datetime.timedelta(days=1)\n comments = PublicComment.visible.filter(time__lt=tomorrow).order_by('-time')\n \n order = lambda x, *ord: x.order_by(*ord) if x else []\n data = {\n 'year': date.year, 'month': date.month, 'day': date.day, 'date': date,\n 'announcements': regular or [],\n 'events': events or [],\n 'jobs': jobs or [],\n 'lost_and_found': lost_and_found or [],\n 'comments': comments[:3],\n 'stories': Article.published.order_by('-pub_date').filter(is_racy=False)[:3],\n 'for_email': boolean_arg(request.GET.get('for_email', ''), False),\n }\n template = \"issue/rsd.\" + ('txt' if plain else 'html')\n return render_to_response(template, data)", "def action_draft(self):\n self.state = 'draft'", "def action_draft(self):\n self.state = 'draft'", "def getAllUnpublishedObjects(self, resource='objects/unpublished'):\n\n objects = list()\n\n for item in self.iterateAllPaginated(resource, vsdModels.APIObject):\n obj = self.getObject(item.selfUrl)\n objects.append(obj)\n return objects", "def update_draft(self, version, request):\n for metadata in version.revision.easypublishermetadata_set.all():\n if request.user.has_perm(\"easypublisher.can_approve_for_publication\"): \n metadata.status = 'published'\n # save all other drafts for this object as declined, because we\n # chose to save a different one\n for other in EasyPublisherMetaData.objects.filter(\n revision__version__object_id=version.object_id, \n revision__version__content_type=version.content_type):\n other.status = 'declined'\n other.save()\n else:\n metadata.status = 'updated'\n metadata.save()", "def test_should_render_with_unpublished(self) -> None:\n self.assertFalse(self.action.should_render(\n context=self._create_request_context(public=False)))", "def publish_draft(self, kav_id):\n kav_api = getattr(self.api, settings.SALESFORCE_ARTICLE_TYPE)\n kav = kav_api.get(kav_id)\n body = kav[settings.SALESFORCE_ARTICLE_BODY_FIELD]\n body = HTML.update_links_production(body)\n\n data = {settings.SALESFORCE_ARTICLE_BODY_FIELD: body}\n\n if settings.SALESFORCE_ARTICLE_TEXT_INDEX_FIELD is not False:\n data[settings.SALESFORCE_ARTICLE_TEXT_INDEX_FIELD] = body\n\n kav_api.update(kav_id, data)\n self.set_publish_status(kav_id, 'online')", "def getLatestUnpublishedObject(self):\n\n res = self.getRequest('objects/unpublished')\n\n if len(res['items']) > 0:\n obj = self.getObject(res['items'][0].get('selfUrl'))\n return obj\n else:\n print('you have no unpublished objects')\n return None", "def get_draft_versions(self, object_id):\n content_type = ContentType.objects.get_for_model(self.model)\n versions = Version.objects.filter(\n revision__easypublishermetadata__status='draft',\n revision__easypublishermetadata__language=get_language(),\n object_id=object_id,\n content_type=content_type\n ).distinct()\n \n return versions", "def show_entries():\n db = get_db()\n cur = db.execute('select distinct name,repo_id,stars, description from python_repos order by stars desc')\n entries = cur.fetchall()\n # get api\n results = get_api()\n # The update operation will consist of deletion and insertion for efficiency\n delete_entry(results)\n add_entry(results)\n return render_template('index.html', entries=entries)", "def view_post(request, slug_post):\n try:\n post = Entry.objects.filter(status=2).get(slug=slug_post)\n except Entry.DoesNotExist:\n raise Http404\n return render_to_response('blog/post.html', {'post':post, 'DISQUS_SHORTNAME':settings.DISQUS_SHORTNAME}, RequestContext(request))", "def test_draft_list_does_not_show_deleted_aids(client, contributor):\n\n AidFactory(name='Is this the real life?', author=contributor,\n status='deleted')\n client.force_login(contributor)\n drafts_url = reverse('aid_draft_list_view')\n res = client.get(drafts_url)\n\n content = res.content.decode('utf-8')\n assert 'Is this the real life?' not in content", "def get_contents(\n self, post_ids: List[str], datetime_filter_fn: Optional[Callable[[datetime], bool]] = None\n ) -> List[str]:\n contents = []\n url = f\"http://blog.naver.com/PostView.nhn\"\n params = {\"blogId\": self.naver_id}\n for post_id in post_ids:\n params[\"logNo\"] = post_id\n\n # Get contents of a post\n response = self.session.get(url, params=params)\n\n soup = BeautifulSoup(response.text, \"html.parser\")\n\n # Smart editor 3\n text = soup.select_one(f\"#post-view{post_id} > div > div > div.se-main-container\")\n # Smart editor 2\n if not text:\n text = soup.select_one(\n f\"#post-view{post_id} > div > div > div.se_component_wrap.sect_dsc.__se_component_area\"\n )\n\n if not text:\n text = soup.select_one(f\"#post-view{post_id}\")\n if text:\n text = text.get_text(\"\\n\").replace(\"\\xa0\", \" \") # Space unicode replace\n else:\n print(f\"[Error] cannot select content in {post_id}.\", file=sys.stderr)\n continue\n\n text = re.sub(\"\\s+\", \" \", text).strip()\n if datetime_filter_fn is None:\n contents.append(text)\n continue\n\n date_time = soup.select(\n f\"#post-view{post_id} > div > div > div > div > div > div.blog2_container > span.se_publishDate.pcol2\"\n )\n date_time += soup.select(\"#printPost1 > tr > td.bcc > table > tr > td > p.date.fil5\")\n\n if date_time:\n date_time = date_time[0].get_text()\n post_datetime = datetime.strptime(date_time, \"%Y. %m. %d. %H:%M\")\n if not datetime_filter_fn(post_datetime):\n continue\n else:\n print(f\"[Error] cannot select datetime in {post_id}, this post is not filtered\")\n\n contents.append(text)\n\n print(f\"Get contents: {len(contents)} found.\")\n return contents", "def test_no_unpublish_button_appears(self):\n response = self.client.get(reverse('wagtailnews:index', kwargs={\n 'pk': self.index.pk}))\n self.assertNotContains(response, self.url)", "def articles():\n entries = []\n cur = g.db.execute(\n \"\"\"\n SELECT entries.location FROM categories\n INNER JOIN entries ON\n entries.slug = categories.slug AND\n entries.published = categories.published\n WHERE categories.category='{category}'\n ORDER BY entries.published DESC\n \"\"\".format(category='article'))\n\n for (row,) in cur.fetchall():\n if os.path.exists(row+\".md\"):\n entries.append(file_parser(row+\".md\"))\n return render_template('blog_entries.html', entries=entries)", "def OnGetItem(self, n):\n try:\n return self.blog.get_blog(n).html()\n except IndexError, err:\n display_error(_('Could not get blog.'), error=err)\n return \"<p>Corrupted Blog</p>\"", "def show_archives():\n if not session.get('logged_in'): \n latest = Post.query.filter_by(visible=True)\n else:\n latest = Post.query\n latest = latest.order_by(Post.id.desc()).limit(10)\n months = Post.query.get_months()\n tags = Tag.query.order_by(Tag.name).all()\n #: Needed for calculation of tag cloud\n max_count = Tag.query.get_maxcount()\n categories = sorted(Category.query.all(), key=lambda x: -x.post_count)\n uncategorized_count = Post.query.filter(Post.categories==None).count()\n return render_template('archives.html', latest=latest, tags=tags,\n categories=categories, uncategorized_count=uncategorized_count, \n months=months, max_count=max_count)", "def get_queryset(self):\n return Article.objects.filter(pub_date__lte=timezone.now())", "def _get_published(self):\n return self.__published", "def recently(self):\n items = []\n for item in self.p.entries:\n dt = datetime.fromtimestamp(mktime(item.published_parsed))\n delta = datetime.today() - dt\n\n if delta.days > self.days:\n continue\n items.append(item)\n if 'verbose' in self.args and self.args['verbose']:\n print delta.days, dt\n self.items = items\n return items", "def entry(request, entry_id):\n\n __time_update(request.user)\n\n try:\n entry = Entry.objects.get(id=entry_id)\n feed = entry.feed\n\n if feed.user == request.user:\n entry = entry.entry.read()\n else:\n return render_to_response('message.html', {'message':\n 'There is no such entry.',\n 'back': '/feeds'})\n except:\n return render_to_response('message.html', {'message':\n 'Error opening entry file! Please, reload feed.',\n 'back': '/feeds'})\n\n return HttpResponse(entry)", "def action_draft(self):\n options=self.env['plm.config.settings'].GetOptions()\n status = 'draft'\n action = 'draft'\n default = {\n 'state': status,\n 'engineering_writable': True,\n }\n doc_default = {\n 'state': status,\n 'writable': True,\n }\n operationParams = {\n 'status': status,\n 'statusName': _('Draft'),\n 'action': action,\n 'docaction': 'draft',\n 'excludeStatuses': ['draft', 'released', 'undermodify', 'obsoleted'],\n 'includeStatuses': ['confirmed', 'uploaded', 'transmitted'],\n 'default': default,\n 'doc_default': doc_default,\n }\n if options.get('opt_showWFanalysis', False):\n return self.action_check_workflow(operationParams)\n else:\n ids=self._ids\n self.logging_workflow(ids, action, status)\n return self._action_to_perform(ids, operationParams, default)", "def show(request):\n patchsets = request.issue.get_patchset_info(request.user, None)\n last_patchset = first_patch = None\n if patchsets:\n last_patchset = patchsets[-1]\n if last_patchset.patches:\n first_patch = last_patchset.patches[0]\n messages = []\n has_draft_message = False\n for msg in request.issue.messages:\n if not msg.draft:\n messages.append(msg)\n elif msg.draft and request.user and msg.sender == request.user.email():\n has_draft_message = True\n num_patchsets = len(patchsets)\n return respond(request, 'issue.html', {\n 'first_patch': first_patch,\n 'has_draft_message': has_draft_message,\n 'is_editor': request.issue.edit_allowed,\n 'issue': request.issue,\n 'last_patchset': last_patchset,\n 'messages': messages,\n 'num_patchsets': num_patchsets,\n 'patchsets': patchsets,\n })", "def unpublish(id):\n db = core.connect()\n theShift = db[id]\n theShift[\"publishData\"][\"draft\"] = True\n db[id] = theShift", "def view_entry(id):\n db = get_db()\n cur = db.execute('select id, title, ingredients, steps, tags, url \\\n from entries where id = ? order by id desc',\n [id.strip()])\n entries = cur.fetchall()\n return render_template('view_entry.html', entries=entries)", "def unpublish_action(modeladmin, request, queryset):\n\n count = queryset.filter(published=True).update(published=False)\n messages.info(request, f\"Unpublished {count} objects\")", "def newsfeed(request):\n article_list = Article.objects.order_by('published_date')\n context = {'article_list': article_list}\n return render(request, 'sacms/newsfeed.html', context)", "def newsfeed_en(request):\n article_list = Article.objects.order_by('published_date')\n context = {'article_list': article_list}\n return render(request, 'sacms/newsfeed_en.html', context)", "def not_expired_posts_per_topic_list(request, pk):\n update_posts_expiration()\n #get posts that are not expired in a certain topic\n post = Post.objects.filter(is_expired=False, topic=pk)\n serializer = ViewPostSerializer(post, many=True)\n return Response(serializer.data)", "def popView(self,event=None):\r\n self.journalView.clearTree()\r\n self.displaySet = set()\r\n sqlStr = str.format(\"SELECT * FROM {} WHERE Date>= ? ORDER BY Date\",self.dataTable)\r\n res = self.dbConn.execute(sqlStr,[self.viewStartDate.getDateStamp()]).fetchall()\r\n for ln in res:\r\n self.displaySet.add(ln[\"Date\"])\r\n self.journalView.addLine(\"\",dateFromStamp(ln[\"Date\"]),ln[2:])\r\n\r\n allDateSet = {stampFromDate(ln) for ln in daysSince(self.viewStartDate.getDateText())}\r\n missingDates = sorted(list(allDateSet-self.displaySet))\r\n tmpStr = str.format(\"{} of {}\",len(missingDates),len(allDateSet))\r\n for ln in missingDates:\r\n self.missingDates.addLine(dateFromStamp(ln))", "def get_queryset(self):\n return Post.objects.filter(published_date__isnull=True).order_by('created_date')", "def form_valid(self, form):\n\n draft_pk = self.request.POST.get(\"pub_draft_pk\", \"\")\n publishing_draft = draft_pk.isdigit()\n\n if (not publishing_draft) and (self.topic.exists and self.topic.is_banned):\n # Cannot check is_banned before checking its existence.\n notifications.error(self.request, _(\"we couldn't handle your request. try again later.\"))\n return self.form_invalid(form)\n\n status = self.request.user.entry_publishable_status\n\n if status is not None:\n notifications.error(self.request, status, extra_tags=\"persistent\")\n if publishing_draft:\n return redirect(reverse(\"entry_update\", kwargs={\"pk\": int(draft_pk)}))\n return self.form_invalid(form)\n\n if publishing_draft:\n try:\n entry = Entry.objects_all.get(\n pk=int(draft_pk), is_draft=True, author=self.request.user, topic__is_banned=False\n )\n entry.content = form.cleaned_data[\"content\"]\n entry.is_draft = False\n entry.date_created = timezone.now()\n entry.date_edited = None\n except Entry.DoesNotExist:\n notifications.error(self.request, _(\"we couldn't handle your request. try again later.\"))\n return self.form_invalid(form)\n else:\n # Creating a brand new entry.\n entry = form.save(commit=False)\n entry.author = self.request.user\n\n if self.topic.exists:\n entry.topic = self.topic\n else:\n if not self.topic.valid:\n notifications.error(self.request, _(\"curses to such a topic anyway.\"), extra_tags=\"persistent\")\n return self.form_invalid(form)\n\n entry.topic = Topic.objects.create_topic(title=self.topic.title)\n\n entry.save()\n notifications.info(self.request, _(\"the entry was successfully launched into stratosphere\"))\n return redirect(reverse(\"entry-permalink\", kwargs={\"entry_id\": entry.id}))", "def show():\n context = {\n \"posts\": get_posts()[::-1]\n }\n return render_template(\"show.html\", **context)", "def get_queryset(self):\n return Post.objects.filter(pub_date__lte=timezone.now())", "def get_queryset(self):\n return Post.objects.filter(pub_date__lte=timezone.now())", "def rss2(request):\n return {'pastes': previous()}", "def nfldraft(self, irc, msg, args, optyear, optround):\n \n if optyear: # if optyear is there, test for valid and if after 2003.\n testdate = self._validate(optyear, '%Y')\n if not testdate:\n irc.reply(\"Invalid year. Must be YYYY.\")\n return\n if optyear < 1996:\n irc.reply(\"Year must be after 1996.\")\n return\n \n if optround:\n if 1 <= optround <= 7:\n irc.reply(\"Draft round must be 1 or 7.\")\n return\n \n url = self._b64decode('aHR0cDovL2luc2lkZXIuZXNwbi5nby5jb20vbmZsL2RyYWZ0L3JvdW5kcw==')\n\n if optyear: # add year if we have it.\n url += '?year=%s' % (optyear)\n\n if optround: # optional round.\n url += '&round=%s' % (optround)\n\n try:\n req = urllib2.Request(url)\n html = (urllib2.urlopen(req)).read()\n except:\n irc.reply(\"Failed to fetch: %s\" % url)\n return\n\n soup = BeautifulSoup(html)\n\n # check and make sure we have a table, otherwise error.\n if not soup.find('table', attrs={'class':'tablehead draft-tracker'}): \n irc.reply(\"error: could not find any draft information. Bad year or round?\")\n return\n else:\n table = soup.find('table', attrs={'class':'tablehead draft-tracker'})\n \n h2 = soup.find('h2')\n rows = table.findAll('tr', attrs={'class': re.compile('^oddrow.*?|^evenrow.*?')})\n\n object_list = []\n \n for row in rows:\n pickNumber = row.find('p', attrs={'class':'round-number'})\n pickName = row.find('p', attrs={'class':'player-name'})\n pickPos = row.find('li', attrs={'class':'li-position'})\n pickTeam = row.find('p', attrs={'class':'team-name'})\n \n appendString = ircutils.bold(pickNumber.getText()) + \". \" + pickName.getText() + \" - \" + pickTeam.getText()\n \n if row.find('p', attrs={'class':'notes'}):\n appendString += \" (\" + row.find('p', attrs={'class':'notes'}).getText() + \")\"\n \n object_list.append(appendString) \n \n irc.reply(ircutils.mircColor(h2.getText().strip(), 'red') + \": \") # print header.\n \n for N in self._batch(object_list, 6):\n irc.reply(' | '.join(str(n) for n in N))", "def has_unpublished_feedbackdraft(self):\n last_feedbackset = self.cached_data.last_feedbackset\n return (last_feedbackset.grading_published_datetime is None\n and last_feedbackset.grading_points is not None)", "def view_blog(self):", "def post_list(request, topic_id, pagination_id):\n\ttry:\n\t\ttopic = Topic.objects.get(id=topic_id)\n\texcept Topic.DoesNotExist:\n\t\treturn HttpResponseRedirect('/forum/')\n\tif topic.is_locked:\n\t\topened = False\n\telse:\n\t\topened = True\n\treturn object_list(\n\t\trequest,\n\t\ttopic.post_set.all().order_by('post_date'),\n\t\tpaginate_by = 10,\n\t\tpage = pagination_id,\n\t\textra_context = {\n\t\t\t'topic_id':topic_id,\n\t\t\t'opened': opened,\n\t\t\t'topic': topic.topic_name,\n\t\t\t'forum_id': topic.topic_forum.id,\n\t\t\t'forum_name': topic.topic_forum,\n\t\t\t'perms': list_perms(request),\n\t\t\t'current_user': str(request.user)},\n\t\ttemplate_name = 'myghtyboard/post_list.html')", "def test_explore_get_list_only_published(self):\n story1 = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published')\n \n story2 = create_story(title=\"Test Story 2\", summary=\"Test Summary 2\",\n byline=\"Test Byline 2\", status='draft')\n resp = self.api_client.get('/api/0.1/stories/explore/')\n self.assertValidJSONResponse(resp)\n self.assertEqual(len(self.deserialize(resp)['objects']), 1)\n self.assertEqual(self.deserialize(resp)['objects'][0]['story_id'], story1.story_id)", "def listing(request, urlname):\n\tyearago = datetime.now() - timedelta(days=365)\n\tif request.user.is_authenticated():\n\t\tblog = Blog.qa_objects.get(urlname=urlname)\n\t\tsections = BlogSection.qa_objects.filter(blog=blog).order_by('position')\n\telse:\n\t\tblog = Blog.objects.get(urlname=urlname)\n\t\tsections = BlogSection.objects.filter(blog=blog).order_by('position')\n\tdisplay_sections = []\n\tfor section in sections:\n\t\tif not section.blogentry_section.filter(Q(entry__posting_time__gte=yearago) | Q(entry__exclude_from_archiving=u'1') | Q(entry__posting_time__gt=datetime.now())):\n\t\t\tcontinue\n\t\tdisplay_section = SectionDisplay(section, request.user.is_authenticated())\n\t\tdisplay_sections.append(display_section)\n\treturn render_to_response('blogs/blog.html', {'blog': blog, 'sections': display_sections}, context_instance=RequestContext(request))", "def list_drafts_ids(request):\n if \"drafts\" not in request.session:\n request.session[\"drafts\"] = []\n\n drafts = request.session[\"drafts\"]\n return [draft[\"id\"] for draft in drafts]", "def notices(request):\n notice_list = Notice.objects.order_by('published_date')\n context = {'notice_list': notice_list}\n return render(request, 'sacms/notices.html', context)", "def test_was_published_recently_with_old_entry(self):\n old_entry = create_entry_with_tag('Title', -30)\n self.assertIs(old_entry.was_published_recently(), False)", "def test_live_request_to_draft_index_fails(self):\n self.setup_inline_index(live=False)\n\n response = self.client.get(self.inline_index.url)\n\n self.assertEqual(response.status_code, 404)", "def list_all(request):\n\n entries = BlogEntry.objects.all()\n data = {'entries': paginate_objects(request, entries),\n 'blog_info': get_blog_info(), 'action_str': 'All Blogs Shown'}\n\n return render_to_response('blog/list_entries.html', data,\n context_instance=get_rq(request))", "def toggle_publish(request,id):\n \n instance = get_object_or_404(Post, id=id)\n\n if request.method==\"POST\":\n instance.published = not instance.published\n instance.save()\n\n t, created = LastUpdate.objects.get_or_create(id=1)\n\n t.updated = instance.updated\n t.save()\n\n cache.clear()\n return redirect('posts:home')\n\n context = {'post':instance}\n\n return render(request, 'posts/publish.html', context)", "def action_draft(self):\n context = self._context or {}\n inv_obj = self.env['account.invoice']\n\n brw = self.browse( self.ids[0])\n inv_ids = [i.invoice_id.id for i in brw.line_ids]\n if inv_ids:\n inv_obj.write( {'wh_src_id': False})\n\n return self.write( {'state': 'draft'})", "def get_all_posts(request, show_only=None):\n if(show_only == None):\n posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('-is_important', '-published_date')\n title = \"All Posts\"\n else:\n posts = Post.objects.filter(post_type__exact=show_only.name).filter(published_date__lte=timezone.now()).order_by('-is_important', '-published_date')\n title = show_only.name\n return render(request, \"posts.html\", {\"posts\": posts, \"title\": title})", "def stage_draft_rulings(self):\r\n rulings = pd.read_excel(self.draft_ruling_path)\r\n for k in rulings.keys():\r\n rulings[k].fillna(value=\"\", inplace=True)\r\n rulings = rulings.to_dict(\"records\")\r\n id_to_ruling = dict(\r\n map(lambda r: (self.id(r), r), rulings)\r\n )\r\n u.cache_results(id_to_ruling, self.staged_ruling_path)", "def _initialize_drafts(self):\n drafts = memcache.get('user_drafts:' + self.email)\n if drafts is not None:\n self._drafts = drafts\n ##logging.info('HIT: %s -> %s', self.email, self._drafts)\n return False\n # We're looking for the Issue key id. The ancestry of comments goes:\n # Issue -> PatchSet -> Patch -> Comment.\n issue_ids = set(comment.key().parent().parent().parent().id()\n for comment in gql(Comment,\n 'WHERE author = :1 AND draft = TRUE',\n self.user))\n self._drafts = list(issue_ids)\n ##logging.info('INITIALIZED: %s -> %s', self.email, self._drafts)\n return True", "def apply_view(collection, view_name):\n for row in collection.view(view_name):\n if row.value.get(\"revisionState\") == \"published\":\n yield row.value", "def test_get_list_published_only(self):\n story1 = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published',\n language=\"en\", author=self.user)\n story2 = create_story(title=\"Test Story 2\", summary=\"Test Summary 2\",\n byline=\"Test Byline 2\", status='draft',\n language=\"en\", author=self.user)\n uri = '/api/0.1/stories/'\n resp = self.api_client.get(uri)\n self.assertValidJSONResponse(resp)\n self.assertEqual(len(self.deserialize(resp)['objects']), 1)\n story_ids = [story['story_id'] for story in self.deserialize(resp)['objects']]\n self.assertNotIn(story2.story_id, story_ids)", "def show_all_posts():\n post = Post.query.all()\n\n return render_template('all-posts.html', post=post)", "def allUnresolved(request, page=1):\n objects = im.Issue.objects.filter(resolved_state__isnull=True).reverse()\n \n \n args = utils.generatePageList(request, objects, page)\n args['issues'] = args['objects']\n \n args['no_results'] = args['page'].object_list.count() < 1\n\n return render_to_response(\"issue_list.html\", args,\n context_instance=RequestContext(request))", "def get_query_set(self):\n return super(PublishedManager, self).get_query_set().filter(is_published=True)", "def publish_view(self, request, object_id, revision_id, extra_context=None):\n \n obj = get_object_or_404(self.model, pk=object_id)\n version = get_object_or_404(Version,\n revision=revision_id,\n object_id=force_unicode(obj.pk),\n content_type=ContentType.objects.get_for_model(obj))\n \n if not version.revision.easypublishermetadata_set.filter(language=request.LANGUAGE_CODE):\n request.user.message_set.create(message=_(\"There is no draft available for language %s\") % request.LANGUAGE_CODE)\n return HttpResponseRedirect('../../current')\n \n # Generate the context.\n context = {\n \"title\": _(\"Publish %(name)s\") % {\"name\": self.model._meta.verbose_name},\n \"publish\":True,\n 'has_draft':True,\n 'link_current':True,\n 'extra':0,\n 'revision_id': revision_id,\n }\n context.update(extra_context or {})\n return self.render_revision_form(request, obj, version, context, revert=True)", "def rss(request, blog):\n\tblog = Blog.objects.get(urlname=blog)\n\tarticles = BlogEntry.objects.filter(blog=blog).order_by('-posting_time')[:RSS_COUNT]\n\treturn render_to_response('rss/blog.html', {'blog': blog, 'articles': articles}, context_instance=RequestContext(request))", "def show_all_entries(user_id):\n\n # prevents the public for accessing user specific information\n if not session.get(\"user_id\") or session[\"user_id\"] != user_id:\n return redirect(\"/\")\n\n # grab all the users entries\n user = User.query.get(user_id)\n entries = (\n Entry.query.filter_by(user_id=user_id).order_by(desc(\"date_created\")).all()\n )\n\n page, per_page, offset = get_page_args(\n page_parameter=\"page\", per_page_parameter=\"per_page\"\n )\n\n per_page = 5\n\n offset = (page - 1) * per_page\n total = len(entries)\n\n pagination_entries = entries[offset : offset + per_page]\n pagination = Pagination(\n page=page, per_page=per_page, total=total, css_framework=\"bootstrap4\"\n )\n\n return render_template(\n \"all-entries.html\",\n entries=pagination_entries,\n user=user,\n page=page,\n per_page=per_page,\n pagination=pagination,\n )", "def db_select_unpublished(self):\n \n query = \"SELECT * FROM %s WHERE doi IS NULL\" % PUBLICATIONS_TABLE\n with self.connection:\n c = self.connection.cursor()\n c.execute(query)\n result = c.fetchall()\n \n paths = []\n for r in result:\n paths.append(str(r[\"path\"]))\n return paths", "def test_no_edit_button_appears(self):\n response = self.client.get(reverse('wagtailnews:index', kwargs={\n 'pk': self.index.pk}))\n self.assertNotContains(response, self.url)" ]
[ "0.7508209", "0.6815543", "0.6162445", "0.6106849", "0.60844666", "0.6059696", "0.60429025", "0.60228574", "0.6012785", "0.59416395", "0.5828027", "0.5751404", "0.5730366", "0.57115114", "0.5708828", "0.5697832", "0.5697832", "0.5679159", "0.5660224", "0.5650022", "0.56475645", "0.5633808", "0.56066024", "0.55717516", "0.5559834", "0.55557215", "0.5546395", "0.5537633", "0.550518", "0.5502599", "0.5495279", "0.5472099", "0.5427615", "0.5415658", "0.54155874", "0.5406701", "0.5384905", "0.5383327", "0.53832996", "0.5364456", "0.5364456", "0.5359114", "0.5350965", "0.5342221", "0.53179646", "0.5313745", "0.5302041", "0.52920455", "0.5291424", "0.52910274", "0.52822644", "0.5281414", "0.52764297", "0.5268412", "0.5256984", "0.52445287", "0.52381164", "0.52306795", "0.52273506", "0.52151597", "0.5215149", "0.5211697", "0.5205741", "0.5205462", "0.5203711", "0.51941293", "0.51925075", "0.5170454", "0.5165073", "0.5160674", "0.5159792", "0.5153355", "0.5153355", "0.514826", "0.5120686", "0.5118211", "0.51130754", "0.511179", "0.51086456", "0.50946265", "0.50941306", "0.50940305", "0.5090391", "0.50863105", "0.5084", "0.50805545", "0.50712574", "0.5061879", "0.50570756", "0.5052846", "0.50489074", "0.5048899", "0.5031596", "0.50226235", "0.5005531", "0.50044614", "0.50006485", "0.49964172", "0.4993763", "0.49916634" ]
0.73618597
1
Create new blog Entry
def create(): if request.method == 'POST': if request.form.get('title') and request.form.get('content'): entry = Entry.create( title = request.form.get('title'), content = request.form.get('content'), published = request.form.get('published') or False) flash('Entry created successfully!', 'success') if entry.published: return redirect(url_for('detail', slug=entry.slug)) else: return redirect(url_for('edit', slug=entry.slug)) else: flash('Title and Content are required!', 'danger') return render_template('create.html')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createNewBlogEntry(self): #$NON-NLS-1$\r\n atomdoc = self._createNewEntryDocument()\r\n self._initNewEntryDocument(atomdoc)\r\n return ZAtomNewBlogEntry(atomdoc)", "def blog_create(request):\n entry = BlogRecord()\n form = BlogCreateForm(request.POST)\n if request.method == 'POST' and form.validate():\n form.populate_obj(entry)\n request.dbsession.add(entry)\n return HTTPFound(location=request.route_url('home'))\n return {'form': form, 'action': request.matchdict.get('action')}", "def newPost(self, postLink, zserverBlogEntry): #$NON-NLS-1$\r\n atomEntry = self.createNewBlogEntry()\r\n self._populateAtomEntry(atomEntry, zserverBlogEntry)\r\n # publish entry\r\n atomRespEntry = self.createAtomEntry(postLink, atomEntry)\r\n return atomRespEntry", "def new(request):\n\n if request.method == 'POST':\n data = request.POST\n form = BlogEntryForm(creator=request.user, data=data)\n if form.is_valid():\n form.save()\n # Should we redirect to single entry view or to all?\n return HttpResponseRedirect(reverse('blog.list_all'))\n else:\n form = BlogEntryForm(creator=request.user)\n\n data = {'form': form, 'blog_info': get_blog_info()}\n data.update(csrf(request))\n return render_to_response('blog/new_blog.html', data,\n context_instance=get_rq(request))", "def post(self):\n data = request.json\n return create_new_blog(data=data)", "def addBlogEntry(self, space, title, content = ''):\n return BlogEntry.create(self.pm_getSpaceManager().addBlogEntry(self._unbox(space), title, content), self._modelDataManager)", "def create(cls, headline, text, blog):\n post = cls()\n post.headline = headline\n post.text = text\n post.blog = blog\n post.posted_date = timezone.now()\n try:\n post.save()\n return post\n except(ValueError, IntegrityError, OperationalError):\n return None", "def post(self):\n\n title = self.request.get(\"title\")\n blogPost = self.request.get(\"blogPost\")\n author = self.request.cookies.get('name')\n\n if title and blogPost:\n\n bp = Blogposts(parent=blog_key(), title=title,\n blogPost=blogPost, author=check_secure_val(author))\n\n bp.put()\n\n self.redirect('/%s' % str(bp.key.integer_id()))\n else:\n error = \"Please submit both a title and a blogpost!\"\n self.render(\"newpost.html\", title=title,\n blogPost=blogPost, error=error)", "def create_entry(entry):\n Entry.create(**entry)\n return entry", "def createEditBlogEntry(self): #$NON-NLS-1$\r\n atomdoc = self._createEditEntryDocument()\r\n self._initEditEntryDocument(atomdoc)\r\n return ZAtomEditBlogEntry(atomdoc)", "def create():\n if request.method == 'POST':\n title = request.form['title']\n body = request.form['body']\n error = None\n\n if not title:\n error = 'Title is required.'\n\n if error is not None:\n flash(error)\n else:\n db = get_db()\n db.execute(\n 'INSERT INTO post (title, body, author_id)'\n ' VALUES (?, ?, ?)',\n (title, body, g.user['id'])\n )\n db.commit()\n return redirect(url_for('blog.index'))\n\n return render_template('blog/create.html')", "def post(self):\n data = request.json\n create_entry(data)\n return None, 201", "def post(self):\n title = self.request.get(\"title\")\n body = self.request.get(\"body\")\n\n if title and body:\n\n # create a new Post object and store it in the database\n post = Post(\n title=title,\n body=body\n )\n post.put()\n\n # get the id of the new post, so we can render the post's page (via the permalink)\n id = post.key().id()\n self.redirect(\"/blog/%s\" % id)\n else:\n error = \"we need both a title and a body!\"\n #self.render_form(title, body, error)\n self.render(\"newpost.html\", title, body, error)", "def new_blog(blog, template):\n path = '/'.join([POSTS, blog])\n with open(path, 'w') as blg:\n blg.write(template)", "def create(\n\t\trequest: schemas.Blog, db: Session = Depends(get_db),\n\t\tcurrent_user: schemas.User = Depends(oauth2.get_current_user)\n):\n\treturn blog.create(request, db)", "def getBlogEntry(self, id):\n return BlogEntry.create(self.pm_getSpaceManager().getBlogEntry(self._unbox(id)),self._modelDataManager)", "def creating_entry(self):\n response = \"\"\n today = str(date.today())\n curent_time = str(datetime.time(datetime.now()))\n entry = Diary(self.entry_id, self.title, self.body)\n lst = {}\n lst[\"entry_id\"] = entry.entry_id\n lst[\"title\"] = entry.title\n lst[\"date\"] = today\n lst[\"time\"] = curent_time\n lst[\"body\"] = entry.body\n lst[\"updated\"] = entry.updated\n if Validate.validate_entry(Diary.entries, entry):\n response = jsonify({\"message\": \"Duplicate data,Try again\"})\n response.status_code = 409\n else:\n Diary.entries.append(lst)\n response = jsonify({\"message\": \"Entry saved\", \"data\": lst})\n response.status_code = 201\n return response", "def create_entry():\n new_entry = DB_Entry() # Create instance of entry to add the info to\n print('Eratosthenes is ready to add your new entry.\\n')\n new_entry.set_id()\n title = input('Enter the title:\\n')\n new_entry.set_title(title)\n authors = input('Enter the authors as list of surname, firstname separated by semicolons:\\n')\n new_entry.set_authors(authors)\n try:\n year = int(input('Enter the year:\\n'))\n except ValueError:\n try:\n year = int(input('Enter the year as an integer:\\n'))\n except ValueError:\n print('You failed to follow basic instructions. The year is set to 2000\\n')\n year = 2000\n new_entry.set_year(year)\n pub_type = input('Enter the publication type as article/review/book/other:\\n')\n try:\n new_entry.set_type(pub_type)\n except ValueError:\n try:\n pub_type = input('Type must be one of article/review/book/other:\\n')\n new_entry.set_type(pub_type)\n except ValueError:\n print('You failed to follow basic instructions. Type is now set to \\'other\\'\\n')\n pub_type = 'other'\n new_entry.set_type(pub_type)\n keywords = input('Enter list of keywords separated by semicolons:\\n')\n new_entry.set_keywords(keywords.split(';'))\n current_path = input('Enter the current path to the file\\n')\n current_path = current_path.replace('~', '/Users/marcus')\n if not os.path.isfile(current_path):\n print('File not found. Please try again')\n current_path = input('Enter the current path to the file\\n')\n if not os.path.isfile(current_path):\n print('File not found')\n new_entry.set_new_path()\n db_actions.copy_file(new_entry.get_path(), current_path)\n return new_entry", "def new_post(self, content):\n return self.proxy.wp.newPost(self.blog_id, self.username, self.password,\n content)", "def post(self):\n subject = self.request.get('subject')\n content = self.request.get('content')\n\n # if user enter good subject and content, redirect them to new post page\n if subject and content:\n p = Post(parent = blog_key(), subject = subject, content = content)\n p.put() # store the post element into database\n self.redirect('/blog/%s' % str(p.key().id()))\n # otherwise, render an error page \n else:\n error = \"subject and content, please!\"\n self.render(\"newpost.html\", subject=subject, content=content, error=error)", "def new(request):\n assert isinstance(request, HttpRequest)\n if request.method == 'POST': # フォームが提出された\n form = EntryForm(request.POST) # POST データの束縛フォーム\n if form.is_valid(): # バリデーションを通った\n entry = form.save(commit=False)\n entry.member = request.user\n entry.save()\n return HttpResponseRedirect(reverse('entry_list')) # POST 後のリダイレクト\n else:\n form = EntryForm() # 非束縛フォーム\n article_list = Article.objects.order_by('-released_at')[:5]\n auth_form = AuthenticationForm(None, request.POST or None)\n return render(request, 'app/entry_edit.html', { \n 'form': form,\n 'title':'ブログ記事の新規登録',\n 'year':datetime.now().year,\n 'articles':article_list,\n 'blogs':EntryView.get_entry_list('-posted_at',-1, request.user.pk )[:5],\n 'submit_title':'登録する',\n 'auth_form':auth_form,\n 'current_user':request.user,\n })", "def newPost(self, useRawHTML):\n print\n content, publish = self._fillPost(useRawHTML)\n\n # Upload to server\n try :\n postid = self.server.metaWeblog.newPost(\n self.blogid, self.username, self.password,\n content, publish\n )\n except xmlrpclib.Fault as fault:\n display_XMLRPC_errors(\"post the new entry\", fault)\n import pdb\n pdb.set_trace()\n else :\n self._setCategorie(postid)\n print \"New post created with ID =\", postid", "def add_entry():\n if not check_admin_logged() :\n abort(403)\n\n title = request.form['title']\n category = request.form['category']\n buydate = request.form['buydate']\n introduction = request.form['introduction']\n\n if not check_items_in_form(title, category, buydate):\n return redirect(url_for('show_entries_admin'))\n\n new_entry = Entries(title, category, buydate, introduction)\n db.session.add(new_entry)\n\n try :\n db.session.commit()\n except IntegrityError as e :\n flash(e.message)\n return redirect(url_for('show_entries_admin'))\n\n flash(u'成功添加新的条目')\n return redirect(url_for('show_entries_admin'))", "def post(self):\n\n subject = self.request.get('subject')\n content = self.request.get('content')\n\n have_errors = False\n\n if not subject:\n error_subject = \"Please write down the subject\"\n have_errors = True\n if not content:\n error_content = \"Content is required\"\n have_errors = True\n\n if have_errors:\n self.render(\"newpost.html\",\n subject=subject,\n content=content,\n error_subject=error_subject,\n error_content=error_content,\n user=self.user)\n else:\n post = Post(parent=blog_key(),\n subject=subject,\n content=content,\n user=self.user)\n post.put()\n self.redirect('/blog/%s' % str(post.key().id()))", "def create_blog_post(user_id):\n \n data = request.get_json()\n\n # Check if the user is in the database\n user = User.query.filter_by(id=user_id).first()\n if not user:\n return jsonify({\"message\": \"user does not exist!\"}), 400\n\n # Create an instance of a HashTable\n ht = hash_table.HashTable(10)\n\n # Create a blog post\n ht.add_key_value(\"title\", data[\"title\"])\n ht.add_key_value(\"body\", data[\"body\"])\n ht.add_key_value(\"date\", now)\n ht.add_key_value(\"user_id\", user_id)\n\n # Add a blog post to the database\n new_blog_post = BlogPost(\n title=ht.get_value(\"title\"),\n body=ht.get_value(\"body\"),\n date=ht.get_value(\"date\"),\n user_id=ht.get_value(\"user_id\"),\n )\n db.session.add(new_blog_post)\n db.session.commit()\n return jsonify({\"message\": \"new blog post created\"}), 200", "def new_post():\n form = PostForm()\n if form.validate_on_submit():\n post = Post(pub_date=datetime.date.today())\n post.title = form.title.data\n post.content = form.content.data\n post.slug = slugify(post.title)\n db.session.add(post)\n db.session.commit()\n return flask.redirect(flask.url_for(\n 'view_post',\n year=post.pub_date.year,\n month=post.pub_date.month,\n day=post.pub_date.day,\n slug=post.slug\n ))\n return flask.render_template('new.html', form=form)", "def add_entry():\n if not session.get('logged_in'):\n abort(401)\n\n if request.method == 'POST':\n db = get_db()\n cur = db.execute('insert into entries (title, ingredients, steps, \\\n tags, url) values (?, ?, ?, ?, ?)',\n [request.form['title'], request.form['ingredients'],\n request.form['steps'], request.form['tags'],\n request.form['url']])\n db.commit()\n flash('Recipe, ' + escape(request.form['title'])\n + ', was successfully added', 'success')\n return view_entry(str(cur.lastrowid))\n else:\n return render_template('add_entry.html')", "def new_entry(title, content):\n\n title.strip # Remove the spaces from both sides.\n filename = f\"entries/{title}.md\"\n if default_storage.exists(filename):\n return False\n default_storage.save(filename, ContentFile(content))\n return True", "def main(blog, date):\n template = front_matter({\n \"title\": blog,\n \"date\": get_date(\"%Y-%m-%d %H:%M:%S %z\"),\n })\n new_blog(date + '-' + blog + '.markdown', template)", "def post(self, request):\n\n # crear el formulario con los datos del POST\n blog_with_user = Blog(owner=request.user)\n form = BlogForm(request.POST, instance=blog_with_user)\n\n if form.is_valid():\n #crea el post\n blog = form.save()\n\n #generar mensaje de exito\n msg = \"Blog creado con éxito\"\n\n # limpiamos el formulario creando uno vacío para pasar a la plantilla\n form = BlogForm()\n else:\n msg = \"Ha ocurrido un error al guardar el blog\" \\\n\n\n # renderiza la plantilla con el formulario\n context = {\n \"form\": form,\n \"msg\": msg\n }\n\n # renderiza y devuelve la plantilla\n return render(request, 'blogs/new-blog.html', context)", "def create_post():\n\n #Get prompt id\n prompt_id = request.form.get('prompt_id')\n\n # Get post text\n post_text = request.form.get('user_post')\n\n # Create post timestamp\n created_at = datetime.now()\n user_facing_date = created_at.strftime(\"%B %d, %Y\")\n\n # Save post and related data to database\n post = crud.create_post(session['user_id'], prompt_id, post_text, session['lat'], session['lng'], session['user_facing_location'], created_at)\n\n return render_template('post_data.html', post=post, user_facing_date=user_facing_date)", "def create(thing):\n fields = {}\n errors = []\n\n for col in thing.cols:\n new[col.field_name] = request.form.get(col.field_name)\n if col.required and not new[col.field_name]:\n errors.append('%s cannot be empty' % col.human_name)\n\n if errors:\n for e in errors:\n flash(e)\n add_template_variable('thing', thing)\n add_template_variable('fields', fields)\n return my_render_template('generic/create_post.html')\n\n # insert into database\n\n db = get_db()\n cursor = db.cursor()\n\n # create the two strings we use in the query\n field_names = \"'\" + \"', '\".join(thing.field_names) + \"'\"\n question_marks = \", \".join(map(lambda x: '?', thing.field_names.count() ))\n\n cursor.execute(\"insert into posts (%s) values (%s)\" % (field_names, question_marks), (title, body))\n db.commit()\n new_id = cursor.lastrowid\n\n # show new post to the user\n flash(\"You made a new %s\" % thing.human_name)\n return redirect(url_for('show_one', id_=new_id))", "def add_post(request):\n if 'form.submitted' in request.params:\n title = request.params['title']\n name = title_to_name(title)\n\n if not name or DBSession.query(Post).filter(Post.name==name).count():\n # this should be a popup ajaxy box\n return Response(\"Name %s is in use, choose a different title\" % name, content_type='text/plain', status_int=500)\n\n body = request.params['body']\n post = Post(title, body, name)\n DBSession.add(post)\n return HTTPFound(location = request.route_url('view_post', postname=name))\n\n save_url = request.route_url('new_post')\n post = Post('')\n return environment_factory(post=post, save_url=save_url)", "def post(self):\n post_title = self.request.get(\"post_title\")\n post_content = self.request.get(\"post_content\")\n param_list = dict(post_title=post_title, post_content=post_content)\n any_error = False\n\n if not post_title:\n param_list['title_error'] = \"Title is missing!\"\n any_error = True\n if not post_content:\n param_list['content_error'] = \"Content is missing!\"\n any_error = True\n\n if any_error:\n self.render(\"blog/addpost.html\", **param_list)\n else:\n p = Post.add_post(post_title, post_content, self.user)\n self.redirect('/blog/%s' % str(p.key().id()))", "def createDeleteBlogEntry(self): #$NON-NLS-1$\r\n atomdoc = self._createDeleteEntryDocument()\r\n self._initDeleteEntryDocument(atomdoc)\r\n return ZAtomDeleteBlogEntry(atomdoc)", "def test_create_view_adds_to_db(testapp):\n post_params = {\n 'title': 'Some Title.',\n 'body': 'Some Body.'\n }\n response = testapp.post('/journal/new-entry', post_params, status=302)\n full_response = response.follow()\n assert full_response.html.find(class_='entryListItem').a.text == post_params[\"title\"]", "def create_a_post():\n subj = create_subject()\n post = Post.create(subject=subj, title=\"A great title\", body=\"Just a great day!\")\n post.save()\n return post", "def createAtomEntry(self, postLink, atomNewEntry): #$NON-NLS-1$\r\n atomRequest = self._createNewEntryRequest(postLink, atomNewEntry)\r\n self._sendAtomEntry(atomRequest, atomNewEntry)\r\n atomEntry = atomRequest.getEntry()\r\n del atomRequest\r\n return atomEntry", "def post_entry(self, body, link=None, to=None, **args):\n args.update(body=body)\n if link: args.update(link=link)\n if to: args.update(to=to)\n return self.fetch(\"/entry\", post_args=args)", "def create_and_add_entry(self, **attrs):\n return self.add_entry(self.create_entry(**attrs))", "def add_new_posts(last_updated=None):\n for blog in Blog.objects.all():\n try:\n document = feedparser.parse(blog.feed_url)\n except:\n print \"error parsing\"\n continue\n\n if last_updated is None:\n print(\"- Adding %i articles from %s\" % (len(document['entries']), blog.title))\n\n for entry in document['entries']:\n # now we create a new post\n post = Post()\n post.blog = blog\n post.title = entry['title']\n\n if 'summary' in entry:\n post.content = entry['summary']\n if 'content' in entry:\n post.content = entry['content']\n\n post.link = entry['link']\n post.save()\n else:\n # TODO: only parse from a date\n pass", "def create_post(request):\n if request.method == 'POST':\n title = request.POST['title']\n content = request.POST['content']\n user_id = request.POST['author_id']\n category = request.POST['category']\n\n slug = \"-\".join(list(map(lambda word: word.lower(), title.split())))\n author = User.objects.get(id=int(user_id))\n\n # save info in models\n post = Post()\n post.author = author\n post.category = category\n post.title = title\n post.content = content\n post.slug = slug\n post.save()\n return redirect('post')\n\n return render(request, 'posts/create_post.html')", "def process_entry(entry, blog, START):\n try:\n when = entry['updated_parsed']\n except KeyError:\n try:\n when = entry['published_parsed']\n except KeyError:\n return # Ignore undateable posts\n\n if when:\n when = pytz.timezone('UTC').localize(datetime.fromtimestamp(time.mktime(when)))\n else:\n # print blog, entry\n return\n\n if when < START:\n return\n\n title = entry.get('title', \"Null\")\n\n try:\n author = entry['author']\n except KeyError:\n try:\n author = ', '.join(a['name'] for a in entry.get('authors', []))\n except KeyError:\n author = 'Anonymous'\n\n link = entry['link']\n\n try:\n body = entry['content'][0]['value'].replace(\"h1>\",\"b>\").replace(\"h2>\",\"b>\")\n except KeyError:\n body = entry['summary']\n\n return Post(when, blog, title, author, link, body)", "def create_post(category, author, name, content, status):\n return Post.objects.create(category=category, author=author, name=name, content=content, status=status)", "def create_tag_with_entry(title):\n tag = Tag.objects.create(title=title)\n tag.save()\n tag.entry.add(1)\n return tag", "def post(self, post_id=None):\n\n if post_id:\n abort(400)\n else:\n args = parsers.post_post_parser.parse_args(strict=True)\n\n new_post = Post(args['title'])\n new_post.text = args['text']\n # new_post.user = user\n\n if args['tags']:\n for item in args['tags']:\n tag = Tag.query.filter_by(name=item).first()\n # If the tag already exist, append.\n if tag:\n new_post.tags.append(tag)\n # If the tag not exist, create the new one.\n # Will be write into DB with session do.\n else:\n new_tag = Tag(item)\n new_post.tags.append(new_tag)\n db.session.add(new_post)\n db.session.commit()\n return (new_post.id, 201)", "def add_blog(self, text):\n self.blog.add_blog(text)\n self.refresh()", "def post(request, blog, urlname):\n\tif request.user.is_authenticated():\n\t\tblog = Blog.qa_objects.get(urlname=blog)\n\t\tpost = BlogEntry.qa_objects.get(blog=blog, urlname=urlname)\n\t\tposts = BlogEntry.qa_objects.filter(blog=blog).order_by('-posting_time')[:5]\n\t\tblogs = Blog.qa_objects.order_by('name')\n\telse:\n\t\tblog = Blog.objects.get(urlname=blog)\n\t\tpost = BlogEntry.objects.get(blog=blog, urlname=urlname)\n\t\tposts = BlogEntry.objects.filter(blog=blog).order_by('-posting_time')[:5]\n\t\tblogs = Blog.objects.order_by('name')\n\tfyi = Article.objects.filter(news_type='FYI').order_by('-posting_time')[:5]\t\n\treturn render_to_response('blogs/post.html', {'blog': blog, 'post': post, 'posts': posts, 'fyi': fyi, 'blogs': blogs}, context_instance=RequestContext(request))", "def create_entry_with_tag(title, days):\n time = timezone.now() + timedelta(days=days)\n\n # create instance of entry with category\n entry = Entry.objects.create(title=title, pub_date=time, body='body')\n entry.save()\n entry.tag.add(1)\n return entry", "def insert_blog(self, title, subtitle, author, content):\n\n now = datetime.now()\n date = now.strftime(\"%B %d, %Y || %I:%M%p\")\n author_dict = self.get_author_by_name(author)\n if author_dict is not False:\n author_id = author_dict['author_id']\n\n cur = self.conn.cursor()\n\n query = ('INSERT INTO blog(title, subtitle, content, date, '\n ' author_id) '\n 'VALUES(?, ?, ?, ?, ?) ')\n cur.execute(query, (title, subtitle, content, date, author_id))\n self.conn.commit()\n return self.get_blog_by_id(cur.lastrowid)\n else:\n return False", "def test_create_new_entry_creates_new(db_session, dummy_request):\n from learning_journal.views.default import new_entry\n\n dummy_request.method = \"POST\"\n dummy_request.POST[\"title\"] = \"Learning Journal Title\"\n dummy_request.POST[\"body\"] = \"So many things learned today.\"\n\n new_entry(dummy_request)\n\n query = db_session.query(MyModel).all()\n assert query[0].title == \"Learning Journal Title\"\n assert query[0].body == \"So many things learned today.\"", "def create_post(bid):\n form = PostForm(request.form)\n if request.method == 'POST':\n if form.validate():\n DB.session.add(\n Post(\n bid,\n current_user.uid,\n form.name.data,\n form.desc.data))\n DB.session.commit()\n flash('Post ({}) successfully created!'.format(form.name.data))\n else:\n flash(constants.DEFAULT_SUBMISSION_ERR)\n return redirect(request.referrer)", "def new_post(mkp_form, request):\n newpost = Posts()\n newpost.init()\n newpost.authorid = int(request.user.id)\n newpost.title = mkp_form.cleaned_data['title']\n newpost.name = mkp_form.cleaned_data['short_title'] # 缩略名\n newpost.cover = mkp_form.cleaned_data['cover_url']\n newpost.introduction = mkp_form.cleaned_data['introduction']\n newpost.content = js_resize_img(mkp_form.cleaned_data['content'])\n newpost.status = Status.objects.get(id=2) # id为2是已发布的文章,默认为已发布,后面再改\n tagids = mkp_form.cleaned_data['tags']\n if len(tagids) != 0:\n for tagid in tagids:\n tagid = int(tagid)\n tag = Tags.objects.get(id=tagid)\n newpost.tags.add(tag)\n threadtypeid = mkp_form.cleaned_data['threadtypeid']\n newpost.threadtypeid = ThreadTypes.objects.get(id=threadtypeid)\n if mkp_form.cleaned_data['commentnotshow'] != '':\n newpost.comment_status = False\n else:\n newpost.comment_status = True\n return newpost", "def post(self, request, pk):\n\n post = Blog.objects.get(pk=int(pk))\n user_id = self.request.session.get('USER_ID')\n\n try:\n user = User.objects.get(pk=user_id)\n except:\n pass\n body = self.request.POST.get('body')\n\n if user_id is None:\n messages.add_message(request, messages.ERROR, \"Please login to add comments.\")\n return HttpResponseRedirect(self)\n\n comments = Comment.objects.create(post=post, author=user, body=body)\n\n d = model_to_dict(post)\n messages.add_message(request, messages.SUCCESS, \"Comment added successfully.\")\n return self.render_to_response(d)", "def create_post():\r\n\r\n # Check for and reject empty username or whinge\r\n if not request.values.get(\"username\") or not request.values.get(\"whinge\"):\r\n print(\"Ignoring request to with empty username or whinge\")\r\n else:\r\n # Form data ok; add to DB\r\n con = get_db()\r\n con.execute(\"INSERT INTO posts (submitter,content,ts) VALUES (?,?,?);\",\r\n (\r\n request.values.get(\"username\"), # form field username -> DB column submitter\r\n request.values.get(\"whinge\"), # form field whinge -> DB column content\r\n time.time()\r\n )\r\n )\r\n con.commit()\r\n con.close()\r\n \r\n # TODO: Handle possibility of failed INSERT\r\n\r\n # Send them back to the main page\r\n return redirect(url_for(\"display_top\"))", "def init_new_entry(args, page=False):\n\n buildingfor = \"posts\"\n if (page):\n buildingfor = \"pages\"\n\n def _remove_temporary_entries(entries):\n result = {}\n for key, value in processed_entries.items():\n if (not \"_\" in key):\n result[key] = value\n\n return result\n\n def _get_new_entry(final_header):\n default_entry = \"---\\n\" + yaml.dump(final_header, allow_unicode=True,\n default_flow_style=False) + \"---\"\n return default_entry\n\n # Get configs\n user_config = configurator.get_config(os.path.join(args.src, paths.CFG_FILE))\n if (not user_config):\n logging.error(\"Error, could not find user config at {}\".format(\n os.path.join(args.src, paths.CFG_FILE)))\n return\n\n theme_headers = defaults.DEFAULT_THEME_HEADERS\n theme_headers_file = os.path.join(args.src, paths.THEMES_PATH,\n user_config[\"theme\"], paths.THEME_HEADERS_FILE)\n if (os.path.isfile(theme_headers_file)):\n tmp = configurator.get_yaml(theme_headers_file)\n # theme headers file might only define entries for posts/pages\n if (tmp[buildingfor]):\n theme_headers = tmp\n\n # Parse remainder (header content)\n processed_entries = _process_header_dict(theme_headers[buildingfor], args.header_content)\n final_entries = _remove_temporary_entries(processed_entries)\n\n # Generate entry file name from user / default template\n file_name = _get_new_entry_path(args, user_config, processed_entries, page)\n\n logging.debug(\"Creating new entry file at \" + file_name)\n\n with open(file_name, 'w+') as stream:\n stream.write(_get_new_entry(final_entries))\n\n logging.debug(\"Done creating entry.\")", "def createBlogView(request):\n data = {\n 'user': request.user,\n 'email': request.user.email,\n }\n\n bloggerForm = BloggerForm(initial=data)\n\n if request.method == \"POST\":\n bloggerForm = BloggerForm(request.POST, request.FILES)\n if bloggerForm.is_valid():\n bloggerForm.save()\n return redirect(\"home\")\n\n context = {\n \"bloggerForm\": bloggerForm,\n }\n\n return render(request, \"blog/create_blog.html\", context)", "def create_db_post(entry, keys, like):\n h = get_hash(entry['link'])\n collection = pos if like else neg\n return collection.update(\n {'hash': h},\n {\n 'link': entry['link'],\n 'title': entry['title'],\n 'published': '',\n 'content': \" \".join(keys),\n 'hash': h,\n 'read': False\n }, upsert=True\n )", "def create_post(user_id):\n\n user = User.query.get_or_404(user_id)\n title = request.form['title']\n content = request.form['content']\n tag_ids = [int(num) for num in request.form.getlist(\"tags\")]\n tags = Tag.query.filter(Tag.id.in_(tag_ids)).all()\n \n new_post = Post(title=title, content=content, user=user, tags=tags)\n db.session.add(new_post)\n db.session.commit()\n\n return redirect(f\"/users/{user_id}\")", "def _add_entry(self, cat_entry):\n\n # run through category apps and add orphans to Desktop\n # database, add DM and categories to database\n models.cat_apps(cat_entry)\n\n # run through and categories to database\n models.cat_list(cat_entry.categories)\n\n # create new - models.py \n cat_record = models.Categories(category=cat_entry.category) \n\n # fill in values \n cat_record.fill_record(cat_entry) \n\n BaseInfo.session.add(cat_record)\n\n try:\n BaseInfo.session.commit( )\n except exc.SQLAlchemyError:\n logger.error(\"Commit error\")", "def add_comment(request, entry_pk):\n\n blog = get_object_or_404(BlogEntry, pk=entry_pk)\n\n if not request.user.is_authenticated():\n raise PermissionDenied\n\n form = BlogCommentForm(creator=request.user, blog=blog, data=request.POST)\n\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(blog.get_absolute_url())\n\n return single(request, entry_pk=entry_pk, comment_form=form)", "def create_entry(self, entry_group_name, entry_id, entry):\n try:\n entry = self.__datacatalog.create_entry(parent=entry_group_name,\n entry_id=entry_id,\n entry=entry)\n self.__log_entry_operation('created', entry=entry)\n return entry\n except (exceptions.FailedPrecondition,\n exceptions.PermissionDenied) as e:\n entry_name = '{}/entries/{}'.format(entry_group_name, entry_id)\n self.__log_entry_operation('was not created',\n entry_name=entry_name)\n raise e", "def new_post(request):\n if request.method != 'POST':\n # No data submitted; create a blank form.\n form = PostForm()\n else:\n # POST data submitted; process data.\n form = PostForm(data=request.POST)\n if form.is_valid():\n new_post = form.save(commit=False)\n new_post.owner = request.user\n new_post.save()\n return redirect('blogs:posts')\n\n # Display a blank or invalid form.\n context = {'form': form}\n return render(request, 'blogs/new_post.html', context)", "def test_blogpost_create_by_anonymous(self):\r\n user = self.create_users()[1]\r\n app = self.create_app(info=None)\r\n app.owner = user\r\n db.session.add_all([user, app])\r\n db.session.commit()\r\n url = \"/app/%s/new-blogpost\" % app.short_name\r\n\r\n res = self.app.get(url, follow_redirects=True)\r\n assert res.status_code == 200, res.status_code\r\n assert \"Please sign in to access this page\" in res.data, res\r\n\r\n res = self.app.post(url,\r\n data={'title':'blogpost title', 'body':'body'},\r\n follow_redirects=True)\r\n assert res.status_code == 200, res.status_code\r\n assert \"Please sign in to access this page\" in res.data\r\n\r\n blogpost = db.session.query(Blogpost).first()\r\n assert blogpost == None, blogpost", "def post():\n\n form = forms.PostForm()\n if form.validate_on_submit():\n models.Post.create(title=form.title.data,\n date=form.date.data,\n time_spent=form.time_spent.data,\n details=form.details.data,\n remember=form.remember.data)\n return redirect(url_for('index'))\n return render_template('new.html', form=form)", "def add():\n if request.method == 'GET':\n return render_template('add.html')\n elif request.method == 'POST':\n data = {}\n for key in ('h', 'name', 'summary', 'content', 'published', 'updated', 'category',\n 'slug', 'location', 'in-reply-to', 'repost-of', 'syndication'):\n data[key] = None\n\n for title in request.form:\n data[title] = request.form[title]\n\n for title in request.files:\n data[title] = request.files[title].read()\n\n try:\n photo = request.files['photo']\n except:\n photo = None\n\n for key in data:\n if data[key] == \"\":\n data[key] = None\n\n data['published'] = datetime.now()\n\n location = create_entry(data, image=data['photo'], g=g)\n\n if data['in-reply-to']:\n send_mention('http://' + DOMAIN_NAME + '/e/'+location, data['in-reply-to'])\n\n if request.form.get('twitter'):\n t = Timer(30, bridgy_twitter, [location])\n t.start()\n\n if request.form.get('facebook'):\n t = Timer(30, bridgy_facebook, [location])\n t.start()\n return redirect(location)\n else:\n return redirect('/404'), 404", "def create():\r\n if request.method == 'POST':\r\n title = request.form['title']\r\n body = request.form['body']\r\n error = None\r\n db = get_db()\r\n cur = db.cursor()\r\n\r\n cur.execute('SELECT title FROM novel.post WHERE title = %s', title)\r\n newTitle = cur.fetchone()\r\n\r\n if not title:\r\n error = 'Title is required.'\r\n\r\n if newTitle and newTitle['title'] == title:\r\n error = 'Title is repeated.'\r\n\r\n if error is not None:\r\n flash(error)\r\n else:\r\n db = get_db()\r\n db.cursor().execute(\r\n 'INSERT INTO novel.post (title, body, author_id) VALUES (\"{0}\", \"{1}\", \"{2}\")'\r\n .format(title, body, g.user['id'])\r\n )\r\n db.commit()\r\n return redirect(url_for('novel.index'))\r\n\r\n return render_template('novel/create.html')", "def add_entry(title: str, datetime: pendulum.datetime) -> None:\n datetime = datetime.in_tz('UTC')\n conn = sqlite3.connect('rss.db')\n c = conn.cursor()\n c.execute(\n \"\"\"insert into entries values\n (?, ?, ?, ?)\"\"\",\n (title, datetime.year, datetime.month, datetime.day)\n )\n conn.commit()\n conn.close()", "def blog():\n return jsonify(\n {\n 'entries': [\n {\n 'title': 'A blog post about things',\n 'date': '7/3/12',\n 'id': 1,\n 'lead': \"\"\"once upon a time, there was a cool dude who did\n cool things. This is his story.\"\"\",\n 'body': \"More content for the blog post\",\n 'more_url': 'http://blog.tobywaite.net',\n },\n {\n 'title': 'Cool projects, ftw',\n 'date': '6/3/12',\n 'id': 2,\n 'lead': \"\"\"I did a really cool project once, this is all\n about it.\"\"\",\n 'body': \"More content for the blog post\",\n 'more_url': 'http://blog.tobywaite.net',\n },\n ]\n }\n )", "def add_post(content):\n db = psycopg2.connect(\"dbname=forum\")\n c = db.cursor()\n content = bleach.clean(content)\n c.execute(\"insert into posts values (%s)\", (content,))\n db.commit()\n db.close()\n # POSTS.append((content, datetime.datetime.now()))", "def test_creating_new_post(self):\n\n form_data = {\"meal-time\": \"2020-02-25 08:00:00\", \n \"meal-setting\": \"At home!\", \"TEB\": \"Some thoughts..\",\n \"hunger\": 2, \"fullness\": 8, \"satisfaction\": 5,\n \"meal-notes\": \"Some notes.\"}\n \n create_new_post(1, \"/static/images/uploads/2.jpg\", form_data)\n\n post = Post.query.get(3)\n\n self.assertIsInstance(post, Post)\n self.assertEqual(post.meal_setting, \"At home!\")", "def test_Entry_creation(self):\n test_entry = self.create_Entry()\n self.assertTrue(isinstance(test_entry, Entry))", "def test_blog_add():", "def community_post_create_view(request):\n task = \"Create New\"\n form = AddEditPostForm() # An unbound form\n\n if request.method == 'POST': # If the form has been submitted...\n form = AddEditPostForm(request.POST, request.FILES) # A form bound to the POST data\n if form.is_valid(): # All validation rules pass\n post = form.save(commit=False) # Create a new object from the form, but don't save it to the database\n post.author = request.user # Set the author to the current user\n post.save() # Save the object to the database\n slug_str = \"%s %s\" % (post.title, post.date_posted) # Create a slug from the title and date\n post.slug = slugify(slug_str) # Create the slug\n post.save() # Save the object to the database\n return redirect('community-home') # Redirect to the home page\n\n context = { # Pass the variables to the template\n 'task': task,\n 'form': form,\n }\n return render(request,\n 'pages/patient-community/community-create-update-post.html',\n context) # render the patient community create post page", "def add_entry(name, title, duration, notes):\n clear()\n print('Entry added to work log!')\n return Entry.create(\n employee_name=name,\n task_title=title,\n time_spent=duration,\n task_notes=notes\n )", "def post(self, request):\n\n # crear el formulario con los datos del post\n form = PostForm(request.POST)\n\n if form.is_valid():\n #crea el post\n post = form.save()\n\n #generar mensaje de exito\n msg = \"Post creado con éxito\"\n form = PostForm()\n else:\n msg = \"Ha ocurrido un error al guardar el post\" \\\n\n\n # renderiza la plantilla con el formulario\n context = {\n \"form\": form,\n \"msg\": msg\n }\n\n # renderiza y devuelve la plantilla\n return render(request, 'blogs/new-post.html', context)", "def create_entry_without_tag(title, days):\n time = timezone.now() + timedelta(days=days)\n\n # create entry without category\n entry = Entry.objects.create(title=title, pub_date=time, body='body')\n return entry", "def add_new_entry(self):\n clear_screen()\n new_entry = Entry.create()\n if new_entry is None:\n print(\"Add new entry cancelled. Returning to main menu...\")\n time.sleep(1)\n return None\n self.entries.append(new_entry)\n with open(self.file_name, \"a\") as file:\n writer = csv.writer(file)\n writer.writerow([new_entry.date, new_entry.name, new_entry.minutes, new_entry.note])", "def new(cls, handler):\n properties = dict(handler.request.params)\n\n xmlrpc_url = properties['xmlrpc_url']\n db.LinkProperty().validate(xmlrpc_url)\n\n if 'blog_id' not in properties:\n properties['blog_id'] = 0\n\n assert 'username' in properties\n assert 'password' in properties\n\n return WordPress.get_or_insert(xmlrpc_url, **properties)", "def updatePost(self, editLink, entryId, zserverBlogEntry): #$NON-NLS-1$\r\n atomEntry = self.createEditBlogEntry()\r\n atomEntry.setId(entryId)\r\n atomEntry.setEditLink(editLink)\r\n self._populateAtomEntry(atomEntry, zserverBlogEntry)\r\n # update entry\r\n atomRespEntry = self.updateAtomEntry(editLink, atomEntry)\r\n return atomRespEntry", "def single(request, entry_pk=None, comment_form=None):\n\n entry = get_object_or_404(BlogEntry, pk=entry_pk)\n\n if not comment_form:\n comment_form = BlogCommentForm(creator=request.user, blog=entry)\n\n data = {'entry': entry, 'blog_info': get_blog_info(),\n 'comment_form': comment_form}\n return render_to_response('blog/single.html', data,\n context_instance=get_rq(request))", "def insert_new_post(post_arg_set):\n api, post_data, acct_data, page_id, config = post_arg_set\n\n try:\n post_id = post_data['id'] if post_data.has_key('id') else None\n\n except Exception as e:\n log.error( e )\n\n else:\n\n # parse date\n if post_data.has_key('created_time') and post_data['created_time'] is not None: \n dt = datetime.strptime(post_data['created_time'], FB_DATE_FORMAT)\n date_time = tz_adj(dt, config)\n time_bucket = round_datetime(date_time, config)\n raw_timestamp = int(date_time.strftime(\"%s\"))\n \n else:\n time_bucket = None\n raw_timestamp = None\n \n # extract message so we can find links within the msg if not in url\n article_urls = [get_fb_link(post_data, config, unshorten=True)]\n message = post_data['message'].encode('utf-8') if post_data.has_key('message') else None\n message_urls = get_message_urls(article_urls, message, config)\n\n # detect article links, unshorten and parse\n article_urls = [\n parse_url(unshorten_link(url, config)) \\\n for url in article_urls + message_urls\n if url is not None\n ]\n\n article_urls = [url for url in article_urls if is_article(url, config)]\n\n if article_urls:\n for article_url in set(article_urls):\n\n # sluggify url\n article_slug = sluggify(article_url)\n\n # format data\n post_value = {\n 'article_slug': article_slug,\n 'article_url': article_url,\n 'time_bucket': time_bucket,\n 'fb_post_created': raw_timestamp,\n 'raw_timestamp': raw_timestamp,\n 'fb_raw_link' : get_fb_link(post_data, config=config),\n 'fb_page_id': page_id,\n 'fb_post_id': post_id,\n 'fb_page_likes': acct_data['likes'] if acct_data.has_key('likes') else None,\n 'fb_page_talking_about': acct_data['talking_about_count'] if acct_data.has_key('talking_about_count') else None,\n 'fb_type': post_data['type'] if post_data.has_key('type') else None,\n 'fb_status_type': post_data['status_type'] if post_data.has_key('status_type') else None,\n 'fb_message': message\n }\n \n # always insert insights data\n if is_insights(page_id, config):\n \n log.info( \"INSIGHTS\\tAdding data from %s re: %s\" % (page_id, article_slug) )\n\n # fetch data\n insights_value = get_insights_data(api, page_id, post_id)\n\n # create datasource name\n data_source = \"facebook_insights_%s\" % page_id \n \n # upsert url\n upsert_url(article_url, article_slug, data_source, config)\n\n # insert id\n db.sadd('facebook_post_ids', post_id)\n\n # format time bucket\n current_time_bucket = gen_time_bucket(config)\n insights_value['time_bucket'] = current_time_bucket\n post_value.pop('time_bucket', None)\n \n value = json.dumps({\n data_source : dict(post_value.items() + insights_value.items())\n })\n\n # upload data to redis\n db.zadd(article_slug, current_time_bucket, value) \n \n # only insert new posts\n if not db.sismember('facebook_post_ids', post_id):\n \n log.info( \"FACEBOOK\\tNew post %s\\t%s\" % (post_id, article_url) )\n \n # insert id\n db.sadd('facebook_post_ids', post_id) \n \n # upsert url\n data_source = \"facebook_%s\" % page_id\n upsert_url(article_url, article_slug, data_source, config)\n\n value = json.dumps( {data_source : post_value} )\n\n\n # upload data to redis\n db.zadd(article_slug, time_bucket, value)", "def addPost(self,text,id,url,date):\n self.topComments.append(Post(text,id,url,date))\n return None", "def post(self):\n post_id = self.request.get('post_id')\n post = Post.get_by_id(int(post_id), parent=blog_key())\n content = self.request.get('comment')\n\n if content:\n comment = Comment(parent=comment_key(),\n content=content,\n user=self.user,\n post=post)\n comment.put()\n\n time.sleep(0.1)\n self.redirect('/blog/%s' % str(post.key().id()))", "def blog_view(request):\n blog_id = int(request.matchdict.get('id', -1))\n entry = BlogRecordService.by_id(blog_id, request)\n if not entry:\n return HTTPNotFound\n return {'entry': entry}", "def add_new_post(user_id):\n\n title = request.form.get('title')\n content = request.form.get('content')\n\n new_post = Post(\n title=title, content=content, created_at='11-11-2011', user_id=user_id)\n\n db.session.add(new_post)\n db.session.commit()\n flash(f'New post added: {title}')\n\n return redirect(f'/users/{user_id}')", "def view_blog(self):", "def test_form_create(self):\n create = {\n 'title': 'Last Post (Final)',\n 'content': '### Goodbye!',\n 'is_published': False,\n }\n\n form = self.form_cls(create)\n print(form.errors)\n\n form.save()\n\n actual = models.Entry.objects.get(slug='last-post-final')\n self.assertEquals(actual.title, create['title'])\n self.assertEquals(actual.content.raw, create['content'])\n self.assertIsNone(actual.published_timestamp)", "def createcomment(request, pk):\n issue = get_object_or_404(Issue, pk=pk)\n if request.method == \"POST\":\n form = CommentCreationForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.issue = issue\n comment.author = request.user\n comment.created_at = timezone.now()\n comment.save()\n return redirect('office:issue', pk=pk)\n else:\n form = CommentForm()\n return render(request, 'blog/add_comment_to_post.html', {'form': form})", "def create_post(request):\n\n # modified from: http://django-angular.readthedocs.org/en/latest/angular-model-form.html\n\n # get data\n in_data = getRequestData(request)\n\n try:\n # save in database\n # note that in_data.mytitle throws an error while in_data.get('mytitle') works smoothly\n post = Thread(pub_date = datetime.datetime.now(pytz.timezone('US/Eastern')), username = in_data.get('myusername'), title = in_data.get('mytitle'), description = in_data.get('mydescription'))\n post.save()\n except:\n return HttpResponseBadRequest('Error saving to database!')\n\n return JsonResponse(in_data)", "def create_entry_for_topic(cls, topic, entry_id, content_hash):\n\t\tkey = cls.create_key(topic, entry_id)\n\t\treturn cls(key_name=key.name(),\n\t\t\t\t\t\t\t parent=key.parent(),\n\t\t\t\t\t\t\t entry_id=entry_id,\n\t\t\t\t\t\t\t entry_id_hash=utils.sha1_hash(entry_id),\n\t\t\t\t\t\t\t entry_content_hash=content_hash)", "def remotePostCreate(host, post):\n post = post.get('posts')[0]\n author = remoteAddAuthor(post.get('author'))\n title = post.get('title')\n description = post.get('description')\n contentType = post.get('contentType')\n content = post.get('content')\n published = utc.localize(datetime.strptime(post.get('published'), '%Y-%m-%dT%H:%M:%S.%fZ'))\n visibility = post.get('visibility')\n unlisted = post.get('unlisted')\n id = post.get('id')\n origin = post.get('origin')\n count = post.get('count')\n comments = remoteCommentList(post)\n source = \"{}/api/posts/{}\".format(DOMAIN, post.get('id'))\n\n post_dict = {'author': author, 'title': title, 'description': description,\n 'contentType': contentType, 'content': content, 'published': published,\n 'visibility': visibility, 'unlisted': unlisted, 'id': id,\n 'comments': comments, 'origin': origin, 'count': count,\n 'source': source}\n return post_dict", "def add_entry(self, *args, **kwargs):\n entry = Entry(*args, **kwargs) # NOTE: not sure this is good\n self._entries[entry.uuid] = entry\n return entry", "def __add_entries(entries, feed):\n\n for entry in entries:\n try:\n # If there is entry with such title in this feed\n Entry.objects.get(title=entry.title, feed=feed)\n continue\n except Entry.DoesNotExist:\n pass\n\n # Try to find another entries with such title\n e = Entry.objects.filter(title=entry.title)\n # If found\n if len(e) != 0:\n e = e[0]\n # Copy all containing\n entry_obj = Entry(title=e.title,\n description=e.description,\n entry=e.entry, feed=feed)\n entry_obj.save()\n # Or create new Entry from scratch\n else:\n entry_name = entry.title + '.html'\n # If bad link or entry name\n try:\n urlretrieve(entry.link, entry_name)\n\n entry_file = open(entry_name)\n entry_file = File(entry_file)\n\n entry_obj = Entry(title=entry.title,\n description=entry.description,\n entry=entry_file, feed=feed)\n entry_obj.save()\n\n os.remove(entry_name)\n except:\n # Go to next entry\n continue", "def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n\n if serializer.is_valid():\n data = serializer.data\n Article.objects.create_article(url=data['url'],\n authors=data['authors'],\n publish_time=data['publish_time'],\n title_image=data['images'])\n return Response({'message': 'News story added.'})\n else:\n return Response({'error': serializer.errors})", "def admincreate(object):\n if request.method == \"POST\":\n\n db = get_db()\n execute_string = 'INSERT INTO ' + object.title()\n\n if object == 'post':\n execute_string += '(title, content, authorId, categoryId) VALUES (\"' + request.form['title'] + '\", \"' + request.form[\"content\"] + '\", \"' + request.form[\"authorid\"] + '\", \"' + request.form[\"categoryid\"] + '\")'\n elif object == 'author':\n execute_string += '(name) VALUES (\"' + request.form['name'] + '\")'\n elif object == 'category':\n execute_string += '(name, description) VALUES (\"' + request.form['name'] + '\", \"' + request.form[\"description\"] + '\")'\n\n db.execute(execute_string)\n db.commit()\n return redirect(url_for(\"adminview\", object=object))\n\n return render_template(\"new.html\", object=object, item={})", "def new_entry():\n clear_screen()\n entry = {}\n entry['id'] = get_next_id()\n entry['name'] = input_name()\n print(\"How many minutes did you spend on {}?\".format(entry['name']))\n print(\"Or you may specify a format after the time, seperated by a comma\")\n entry['time_spent'] = input_time_spent()\n add_notes = input(\"Add notes? Y/n \").lower()\n if add_notes != 'n':\n entry['notes'] = input_notes()\n entry['date'] = datetime.now().strftime(FMT_MONTH_DAY_YEAR)\n with open(WORK_LOG_FILENAME, 'a', newline='') as work_log:\n work_log_writer = csv.DictWriter(work_log, fieldnames=FIELDNAMES)\n work_log_writer.writerow(entry)", "def create_entry(cls, title, date, timeSpent, learned, resources):\n try:\n with DATABASE.transaction():\n cls.create(\n title=title,\n date=date,\n timeSpent=timeSpent,\n learned=learned,\n resources=resources\n )\n except IntegrityError:\n raise ValueError(\"Entry already exists\")", "def add_post(user_id):\n\n title = request.form['title']\n content = request.form['content']\n tags = request.form.getlist('tag')\n user = User.query.get_or_404(user_id)\n\n if not title or not content:\n flash(\"Please enter title and content.\")\n return redirect(f\"/users/{user.id}/posts/new\")\n\n post = Post(title=title, content=content, user=user)\n\n if tags:\n for tag in tags:\n post.tags.append(Tag.query.filter(Tag.name==tag).one())\n\n db.session.add(post)\n db.session.commit()\n\n user = User.query.get_or_404(user_id)\n\n return redirect(f\"/users/{user_id}\")", "def _post_entry_to_model(self, entry):\n return RedditPost({\n \"id\" : entry.post_id,\n \"subreddit\" : entry.subreddit.name,\n \"author\" : entry.author,\n \"author_premium\" : entry.author_premium,\n \"subreddit_subscribers\" : entry.subreddit_subscribers,\n \"title\" : entry.title,\n \"downs\" : entry.downs,\n \"ups\" : entry.ups,\n \"selftext\" : entry.selftext,\n \"num_comments\" : entry.num_comments,\n \"total_awards_received\" : entry.total_awards_received,\n \"view_count\" : entry.view_count,\n \"permalink\" : entry.permalink,\n \"url\" : entry.url,\n \"created\" : entry.created,\n \"created_utc\" : entry.created_utc,\n })" ]
[ "0.8232406", "0.7668556", "0.7641279", "0.7607913", "0.73694056", "0.7322824", "0.73227113", "0.7250803", "0.7247709", "0.72167945", "0.7215931", "0.6994082", "0.69597936", "0.69173276", "0.68832356", "0.6816936", "0.68142015", "0.67737234", "0.6718318", "0.670351", "0.66975707", "0.6672287", "0.66480535", "0.66211003", "0.6609822", "0.65995824", "0.6567572", "0.6552912", "0.65434533", "0.6512855", "0.65103626", "0.64918727", "0.64737314", "0.64537597", "0.6433041", "0.64178467", "0.6383161", "0.6354531", "0.6351907", "0.63429433", "0.63394064", "0.6336184", "0.6333915", "0.6332487", "0.6332229", "0.6331671", "0.63063705", "0.6304359", "0.62947655", "0.6289036", "0.6272694", "0.6267557", "0.6267053", "0.6265524", "0.62567043", "0.6235287", "0.62213725", "0.61761475", "0.616206", "0.614484", "0.6115828", "0.6111033", "0.61051553", "0.60913104", "0.6091232", "0.60888237", "0.6084437", "0.6082233", "0.60745007", "0.607417", "0.6073534", "0.60554355", "0.60164624", "0.60026044", "0.599182", "0.5990391", "0.59856945", "0.5985608", "0.5981977", "0.5978794", "0.5970021", "0.5969152", "0.59632796", "0.5960684", "0.59429604", "0.59427214", "0.5937715", "0.59326535", "0.5930613", "0.5916968", "0.590812", "0.59047776", "0.5902293", "0.58957183", "0.589418", "0.5881389", "0.587199", "0.5871055", "0.5866197", "0.5865693" ]
0.77166563
1
Edit existing blog Entry
def edit(slug): entry = get_object_or_404(Entry, Entry.slug == slug) if request.method == 'POST': if request.form.get('title'): entry.title = request.form.get('title') if request.form.get('content'): entry.content = request.form.get('content') entry.published = request.form.get('published') or False entry.save() flash('Entry saved successfully!', 'success') if entry.published: return redirect(url_for('detail', slug=entry.slug)) else: return redirect(url_for('edit', slug=entry.slug)) return render_template('edit.html', entry=entry)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updatePost(self, editLink, entryId, zserverBlogEntry): #$NON-NLS-1$\r\n atomEntry = self.createEditBlogEntry()\r\n atomEntry.setId(entryId)\r\n atomEntry.setEditLink(editLink)\r\n self._populateAtomEntry(atomEntry, zserverBlogEntry)\r\n # update entry\r\n atomRespEntry = self.updateAtomEntry(editLink, atomEntry)\r\n return atomRespEntry", "def edit_entry(self, id, body=None, link=None, **args):\n args.update(id=id)\n if body: args.update(body=body)\n if link: args.update(link=link)\n return self.fetch(\"/entry\", post_args=args)", "def blogEdit(request, urlname):\n\tblog = Blog.objects.get(authors=request.user, urlname=urlname)\n\tsections = [(s.pk, s.name) for s in blog.blogsection_set.all()]\n\teditor = request.user in blog.editors.all()\n\tif request.method == 'POST':\n\t\tform = BlogForm(request.POST, sections=sections)\n\t\tif form.is_valid():\n\t\t\tdata = form.cleaned_data\n\t\t\tif data['urlname']:\n\t\t\t\tpost = BlogEntry.qa_objects.get(blog=blog, urlname=data['urlname'])\n\t\t\t\tpost.title = data['title']\n\t\t\t\tpost.byline = data['byline']\n\t\t\t\tpost.byline_link = data['byline_link']\n\t\t\t\tpost.reviewed_by = data['reviewed_by']\n\t\t\t\tpost.reviewed_by_link = data['reviewed_by_link']\n\t\t\t\tpost.body = data['body']\n\t\t\t\tfor section in data['sections']:\n\t\t\t\t\tsection_model = BlogSection.objects.get(pk=section)\n\t\t\t\t\texisting = BlogEntrySection.qa_objects.filter(entry=post, section=section_model)\n\t\t\t\t\tif not existing:\n\t\t\t\t\t\tnew = BlogEntrySection(entry=post, section=section_model)\n\t\t\t\t\t\tpost.blogentrysection_set.add(new)\n\t\t\telse:\n\t\t\t\tpost = BlogEntry(for_update=2, blog=blog, urlname=None, author=request.user, title=data['title'], posting_time=data['posting_time'], display_time=data['display_time'], byline=data['byline'], byline_link=data['byline_link'], reviewed_by=data['reviewed_by'], reviewed_by_link=data['reviewed_by_link'], body=data['body'])\n\t\t\tpost.save()\n\t\t\treturn render_to_response('blogs/admin/blog_admin.html', {'user': request.user, 'editor': editor, 'blog': blog}, context_instance=RequestContext(request))\n\t\telse:\n\t\t\treturn render_to_response('blogs/admin/blog_edit.html', {'form': form, 'user': request.user, 'editor': editor, 'blog': blog}, context_instance=RequestContext(request))\n\telse:\n\t\tif request.GET.get('entry'):\n\t\t\tentry = request.GET['entry']\n\t\t\tpost = BlogEntry.qa_objects.get(blog=blog, urlname=entry)\n\t\t\tinitial_sections = [s.pk for s in post.blogentrysection_set.all()]\n\t\t\tform = BlogForm(sections=sections, initial={'posting_time': post.posting_time, 'display_time': post.display_time, 'urlname': entry, 'title': post.title, 'byline': post.byline, 'byline_link': post.byline_link, 'reviewed_by': post.reviewed_by, 'reviewed_by_link': post.reviewed_by_link, 'body': post.body, 'sections': initial_sections})\n\t\telse:\n\t\t\tform = BlogForm(sections=sections)\n\t\treturn render_to_response('blogs/admin/blog_edit.html', {'form': form, 'user': request.user, 'editor': editor, 'blog': blog}, context_instance=RequestContext(request))", "def createEditBlogEntry(self): #$NON-NLS-1$\r\n atomdoc = self._createEditEntryDocument()\r\n self._initEditEntryDocument(atomdoc)\r\n return ZAtomEditBlogEntry(atomdoc)", "def edit_post(post_id):\n\n if request.method == \"GET\":\n the_post = mongo.db.blog.find_one({'_id': ObjectId(post_id)})\n return render_template(\"edit.html\", page_title=\"Edit Blog\", post=the_post)\n\n elif request.method == \"POST\":\n blog_post = mongo.db.blog\n now = datetime.now()\n blog_post.replace_one({'_id': ObjectId(post_id)},\n {\n 'title': request.form['blog_title'],\n 'blog': request.form['blog_info'],\n 'image': request.form['image_url'],\n 'created': request.form['created'],\n 'updated': now.strftime(\"%d/%m/%Y %H:%M:%S\"),\n })\n return redirect(url_for('blog.home'))", "def blog_update(request):\n blog_id = int(request.params.get('id', -1))\n entry = BlogRecordService.by_id(blog_id, request)\n if not entry:\n return HTTPNotFound()\n form = BlogUpdateForm(request.POST, entry)\n if request.method == 'POST' and form.validate():\n del form.id # SECURITY: prevent overwriting of primary key\n form.populate_obj(entry)\n return HTTPFound(\n location=request.route_url('blog', id=entry.id,slug=entry.slug))\n return {'form': form, 'action': request.matchdict.get('action')}", "def edit_entry(id):\n if not session.get('logged_in'):\n abort(401)\n\n if request.method == 'POST':\n db = get_db()\n db.execute('update entries set title = ?, ingredients = ?, \\\n steps = ?, tags = ?, url = ? where id = ?',\n [request.form['title'], request.form['ingredients'],\n request.form['steps'], request.form['tags'],\n request.form['url'], request.form['id']])\n db.commit()\n flash('Entry ' + id + ' has been modified.', 'success')\n return view_entry(str(id))\n else:\n db = get_db()\n cur = db.execute('select id, title, ingredients, steps, tags, \\\n url from entries where id = ? order by id desc',\n [id.strip()])\n entries = cur.fetchall()\n return render_template('edit_entry.html', entries=entries)", "def edit_entry(entry_id):\n\n\tif not session.get('logged_in'):\n\t\tabout(401)\n\n\tquery = 'UPDATE entries SET text=\"%s\" WHERE id=\"%s\"' % (\n\t\trequest.form['text'], str(entry_id))\n\tg.db.execute(query)\n\tg.db.commit()\n\tflash(\"Entry Edited\")\n\n\treturn redirect(url_for('show_entries'))", "def edit_post(year, month, day, slug):\n post = Post.query.filter_by(slug=slug, pub_date=datetime.date(year, month, day)).first()\n form = PostForm(title=post.title, content=post.content)\n if form.validate_on_submit():\n post.title = form.title.data\n post.content = form.content.data\n db.session.add(post)\n db.session.commit()\n return flask.redirect(flask.url_for(\n 'view_post',\n year=post.pub_date.year,\n month=post.pub_date.month,\n day=post.pub_date.day,\n slug=post.slug\n ))\n return flask.render_template('edit.html', post=post, form=form)", "def edit_post(post_id):\n\n post_data = {\"id\": post_id}\n db_post = Post.query.get_or_404(post_id)\n post_data[\"title\"] = db_post.title\n post_data[\"content\"] = db_post.content\n post_data[\"user_id\"] = db_post.user_id\n\n return render_template(\"edit_post.html\", headline=\"Add New Blogly User\", post=post_data)", "def edit_entry():\n if not check_admin_logged() :\n abort(403)\n\n title = request.form['title']\n category = request.form['category']\n buydate = request.form['buydate']\n ssid = decrypt_book_record(request.form['ssid'])\n\n if not check_items_in_form(title, category, buydate):\n return redirect(url_for('show_entries_admin'))\n\n edited_entry = Entries.query.filter_by(\n id=ssid, title=title, category=category, \\\n buydate=buydate).first()\n\n if edited_entry is not None :\n edited_entry.introduction = request.form['introduction']\n if db.session.is_modified(edited_entry) :\n # commit only if something is modified\n try :\n db.session.commit()\n except IntegrityError as e :\n log_error('error when edit:')\n log_error(e.message)\n flash(u'数据库操作失败导致更新失败!请看后台日志')\n flash(u'成功更新条目')\n\n return redirect(url_for('show_entries_admin'))", "def edit_post(post_id):\n\n post = Post.query.get_or_404(post_id)\n\n title = request.form[\"title\"]\n content = request.form[\"content\"]\n tags = request.form.getlist(\"tag\")\n post.tags = []\n if tags:\n for tag in tags:\n post.tags.append(Tag.query.filter(Tag.name==tag).one())\n\n if not title or not content:\n flash(\"Please enter a title and content\")\n return redirect(f\"/posts/{post.id}/edit\")\n\n post.title = title\n post.content = content\n db.session.add(post) \n db.session.commit()\n\n return redirect(f\"/posts/{post_id}\")", "def edit(request,entry_id):\n assert isinstance(request, HttpRequest)\n try:\n entry = Entry.objects.get(pk=entry_id)\n except Entry.DoesNotExist:\n raise Http404(\"指定されたブログが存在しません。\")\n if not request.user or request.user.pk != entry.member.pk: # ブログ作成者以外は編集できない\n return HttpResponseForbidden() #アドレスをコピペしなければ通常は起こらないため例外処理で済ませておく。\n\n if request.method == 'POST': # フォームが提出された\n form = EntryForm(request.POST, instance = entry) # POST データの束縛フォーム\n if form.is_valid(): # バリデーションを通った\n form.save()\n return HttpResponseRedirect(reverse('entry_list')) # POST 後のリダイレクト\n else:\n form = EntryForm(instance = entry) # 非束縛フォーム\n article_list = Article.objects.order_by('-released_at')[:5]\n return render(request, 'app/entry_edit.html', { \n 'form': form,\n 'title':'ブログ記事の編集',\n 'year':datetime.now().year,\n 'articles':article_list,\n 'blogs':EntryView.get_entry_list('-posted_at',-1, request.user.pk )[:5],\n 'submit_title':'更新',\n 'entry_pk':entry.pk,\n 'current_user':request.user,\n })", "def edit_notes(entry):\n entry.notes = get_notes()\n entry.save()\n input(\"Edit successful. \")\n return entry", "def editPost(self, id, useRawHTML):\n old_data = self._extractPost(id)\n print\n content, publish = self._fillPost(useRawHTML, old_data)\n\n # Upload to server\n try :\n self.server.metaWeblog.editPost(\n id, self.username, self.password,\n content, publish\n )\n if raw_input(\"Change category ?[y|N] \") == \"y\" :\n self._setCategorie(id)\n except xmlrpclib.Fault as fault:\n display_XMLRPC_errors(\"edit entry\", fault)", "def edit_date(entry):\n entry.date = get_date()\n entry.save()\n input(\"Edit successful. \")\n return entry", "def edit_post(self, postid, newpost, publish=True):\n return self.execute('metaWeblog.editPost', postid, self.username, self.password, newpost, publish)", "def edit():", "def edit_article(article_id):\n \n if 'username' in session: \n article = mongo.db.articles.find_one_or_404(\n {'_id': ObjectId(article_id)})\n form=BlogForm()\n form.title.data = article['title']\n form.content.data = article['content']\n return render_template('pages/editarticle.html',\n form=form, \n article=article, \n legend='Edit your Blog Article'\n )", "def handle_edit_post(post_id):\n edited_post = Post.query.get_or_404(post_id)\n\n edited_post.title = request.form['post-title']\n edited_post.content = request.form['post-content']\n\n db.session.add(edited_post)\n db.session.commit()\n\n return redirect(f\"/users/{edited_post.user_id}\")", "def update(id):\n\tpost = get_post(id)\n\n\tif request.method == 'POST':\n\t\ttitle = request.form['title']\n\t\tbody = request.form['body']\n\t\terror = None\n\n\t\tif not title:\n\t\t\terror = 'Title is required.'\n\n\t\tif error is not None:\n\t\t\tflash(error)\n\t\telse:\n\t\t\tdb = get_db()\n\t\t\tdb.execute(\n\t\t\t\t'UPDATE post SET title = ?, body = ? WHERE id = ?',\n\t\t\t\t(title, body, id)\n\t\t\t)\n\t\t\tdb.commit()\n\t\t\treturn redirect(url_for('blog.index'))\n\n\treturn render_template('blog/update.html', post=post)", "def edit_post(request, slug):\n post = Post.objects.get(slug=slug)\n # import pdb; pdb.set_trace()\n if request.method == 'POST':\n post.title = request.POST['title']\n post.content = request.POST['content']\n post.save()\n return redirect('post')\n\n return render(request, 'posts/edit.html', {'post': post})", "def update(\n\t\tblog_id, request: schemas.Blog, db: Session = Depends(get_db),\n\t\tcurrent_user: schemas.User = Depends(oauth2.get_current_user)\n):\n\treturn blog.update(blog_id, request, db)", "def update(id):\n post = get_post(id)\n\n if request.method == 'POST':\n title = request.form['title']\n body = request.form['body']\n error = None\n\n if not title:\n error = 'Title is required.'\n\n if error is not None:\n flash(error)\n else:\n db = get_db()\n db.execute(\n 'UPDATE post SET title = ?, body = ? WHERE id = ?',\n (title, body, id)\n )\n db.commit()\n return redirect(url_for('blog.thread', id=id))\n\n return render_template('blog/update.html', post=post)", "def process_post_edit(user_id, post_id):\n\n title = request.form.get('title')\n content = request.form.get('content')\n\n post = Post.query.get_or_404(post_id)\n\n post.title = title\n post.content = content\n\n db.session.add(post)\n db.session.commit()\n\n return redirect(f'/users/{user_id}/posts/{post_id}')", "def update_entry(entry_id):\n\n entry = Entry.query.get(entry_id)\n user_id = session.get(\"user_id\")\n print(entry)\n # if user_id != entry.user.user_id:\n # return redirect(\"/\")\n\n # grabs information for the form\n user_mood = request.form.get(\"mood\")\n if user_mood is None:\n pass\n else:\n mood = Mood.query.get(int(user_mood))\n entry.mood = mood\n\n user_activities = request.form.getlist(\"activity_category\")\n\n description = request.form.get(\"description\")\n if description is None and entry.description is None:\n pass\n elif description and entry.description:\n entry.description = description\n elif description and entry.description is None:\n entry.description = description\n\n form_activities = []\n for activity_id in user_activities:\n form_activities.append(Activity_Category.query.get(int(activity_id)))\n\n activities = entry.activities\n\n entry.activities.extend(form_activities)\n\n db.session.commit()\n\n flash(\"You have successfully updated an entry!\")\n\n # return redirect(f\"/all-entries/{user_id}\")\n return redirect(f\"/update-entry/{entry.entry_id}\")", "def wordpress_edit_page(post_id, title, content):\n server = ServerProxy(os.environ['WORDPRESS_RPC_URL'])\n return server.wp.editPost(os.environ['WORDPRESS_BLOG_ID'],\n os.environ['WORDPRESS_USERNAME'],\n os.environ['WORDPRESS_PASSWORD'],\n post_id,\n {\n 'post_content': content,\n 'post_title': title,\n })", "def edit_entry(request, entry_id):\n entry= Entry.objects.get(id= entry_id)\n stock= entry.stock\n\n if request.method != 'POST':\n #initial request; pre-fill form with the current note entry.\n form= EntryForm(instance=entry)\n else:\n # POST data submitted; process data.\n form= EntryForm(instance=entry, data=request.POST)\n if form.is_valid():\n form.save()\n return redirect('stock_trackers:stock', stock_id=stock.id)\n\n context= {'entry': entry, 'stock': stock, 'form': form}\n return render(request, 'stock_trackers/edit_entry.html', context)", "def edit_post(request, post_id):\n post = Post.objects.get(id=post_id)\n check_post_owner(request, post)\n\n if request.method != 'POST':\n # Initial request; pre-fill form with the current entry.\n form = PostForm(instance=post)\n else:\n # POST data submitted; process data.\n form = PostForm(instance=post, data=request.POST)\n if form.is_valid():\n form.save()\n return redirect('blogs:post', post_id=post.id)\n\n context = {'post': post, 'form': form}\n return render(request, 'blogs/edit_post.html', context)", "def post(self, post_id):\n post = Post.by_id(int(post_id))\n post_title = self.request.get(\"post_title\")\n post_content = self.request.get(\"post_content\")\n param_list = dict(post=post, post_title=post_title,\n post_content=post_content)\n any_error = False\n\n if not post_title:\n param_list['title_error'] = \"Title is missing!\"\n any_error = True\n if not post_content:\n param_list['content_error'] = \"Content is missing!\"\n any_error = True\n\n if any_error:\n self.render(\"blog/editpost.html\", **param_list)\n else:\n p = Post.update_post(int(post_id), post_title, post_content)\n self.redirect('/blog/%s' % str(p.get_id()))", "def edit(self, **kwargs):\n ...", "def edit(year, month, day, name):\n if request.method == \"GET\":\n try:\n file_name = \"data/{year}/{month}/{day}/{name}\".format(year=year, month=month, day=day, name=name)\n entry = get_bare_file(file_name+\".md\")\n return render_template('edit_entry.html', entry=entry)\n except:\n return render_template('page_not_found.html')\n\n elif request.method == \"POST\":\n data = {}\n for key in ('h', 'name', 'summary', 'content', 'published', 'updated', 'category',\n 'slug', 'location', 'in-reply-to', 'repost-of', 'syndication'):\n data[key] = None\n\n for title in request.form:\n data[title] = request.form[title]\n\n for title in request.files:\n data[title] = request.files[title].read()\n\n for key in data:\n if data[key] == \"\":\n data[key] = None\n\n location = \"{year}/{month}/{day}/{name}\".format(year=year, month=month, day=day, name=name)\n\n if request.form.get('twitter'):\n t = Timer(30, bridgy_twitter, [location])\n t.start()\n\n if request.form.get('facebook'):\n t = Timer(30, bridgy_facebook, [location])\n t.start()\n file_name = \"data/{year}/{month}/{day}/{name}\".format(year=year, month=month, day=day, name=name)\n entry = get_bare_file(file_name+\".md\")\n location = editEntry(data, old_entry=entry, g=g)\n return redirect(location)", "def update(entry_id):\n entry = models.Journal.select().where(\n models.Journal.id == entry_id).get()\n form = forms.JournalForm() # if the form validates\n if form.validate_on_submit(): # if click update button\n entry.title = form.title.data\n entry.date = form.date.data\n entry.time_spent = form.time_spent.data\n entry.learnt = form.learnt.data\n entry.resources = form.resources.data\n entry.save() # commit the changes\n flash('Entry has been updated', 'success')\n return redirect(url_for('detail', entry_id=entry.id))\n elif request.method == 'GET': # fill the form with current data\n form.title.data = entry.title\n form.date.data = entry.date\n form.time_spent.data = entry.time_spent\n form.learnt.data = entry.learnt\n form.resources.data = entry.resources\n return render_template('update.html', form=form)", "def edit(request):\n if 'form.submitted' in request.params:\n # delete old post\n title = request.params['title']\n name = title_to_name(title)\n\n if not name or DBSession.query(Post).filter(Post.name==name).count():\n # this should be a popup ajaxy box\n return Response(\"Name %s is in use, choose a different title\" % name, content_type='text/plain', status_int=500)\n\n body = request.params['body']\n post = Post(title, body, name)\n DBSession.add(post)\n return HTTPFound(location = request.route_url('view_post', postname=name))\n\n save_url = request.route_url('edit_post')\n post = DBSession.query(Post).filter(Post.name==name).first()\n return environment_factory(post=post, save_url=save_url)", "def update_post(post_id):\n\n post = Post.query.get_or_404(post_id)\n post.title = request.form[\"edit_post_title\"]\n post.content = request.form[\"edit_post_content\"]\n tag_ids = [int(num) for num in request.form.getlist(\"tags\")]\n post.tags = Tag.query.filter(Tag.id.in_(tag_ids)).all()\n\n db.session.add(post)\n db.session.commit()\n return redirect(f\"/posts/{post_id}\")", "def editor_edit_post(post_id=None):\n post = Post.query.get(post_id)\n return render_template('ghostdown.html', post=post)", "def get(self, post_id):\n post = Post.by_id(int(post_id))\n\n if self.user and post.author.get_id() == self.user.get_id():\n post.content = post.content.replace('<br>', '\\n')\n self.render(\"/blog/editpost.html\", post=post)\n else:\n self.render(\"/base.html\", error=\"Not allowed to edit post.\")", "def edit_document():", "def edit(self):\n\n pass", "def edit_post(post_id):\n post = Post.query.get_or_404(post_id)\n tags = Tag.query.all()\n\n return render_template(\"posts/edit_post.html\", post=post, tags=tags)", "def edit_post(bid, pid):\n # pylint: disable=unused-argument\n pst = Post.query.get(pid)\n form = PostForm(request.form)\n if request.method == 'POST' and current_user.uid == pst.uid:\n if form.validate():\n if pst.name != form.name.data or pst.text != form.desc.data:\n og_name = pst.name\n pst.name = form.name.data\n pst.text = form.desc.data\n DB.session.commit()\n flash('Post ({}) successfully edited!'.format(og_name))\n else:\n flash(constants.DEFAULT_SUBMISSION_ERR)\n return redirect(request.referrer)", "def edit_entry(self):\r\n self.session = tk.Toplevel(self.master, **jt.bframe_style)\r\n jd.Page(self.session, self.source)", "def update(id):\n if request.method == \"POST\":\n result = update_post(\n id,\n request.form[\"title\"],\n request.form[\"body\"]\n )\n flash(result)\n return redirect(url_for(\"show\"))\n else:\n post = get_post(id)\n return render_template(\"edit.html\", **post)", "def adminedit(object, id):\n\n db = get_db()\n\n if request.method == \"POST\":\n execute_string = 'UPDATE ' + object.title() + \" SET \"\n\n if object == 'post':\n execute_string += 'title = \"' + request.form['title'] + '\", content = \"' + request.form['content'] + '\", authorId = ' + request.form[\"authorid\"] + ', categoryId = ' + request.form[\"categoryid\"] + ''\n elif object == 'author':\n execute_string += 'name = \"' + request.form['name'] + '\"'\n elif object == 'category':\n execute_string += 'name = \"' + request.form['name'] + '\", description = \"' + request.form['description'] + '\"'\n\n execute_string += \" WHERE id = \" + str(id)\n db.execute(execute_string)\n db.commit()\n return redirect(url_for(\"adminview\", object=object))\n\n execute_string = \"SELECT * FROM \" + object.title() + \" WHERE id = \" + str(id)\n item = db.execute(execute_string).fetchone()\n\n return render_template(\"new.html\", object=object, item=item)", "def edit(id):\n r = requests.get(API_ROUTE + '/' + str(id), headers={'Auth': _auth()})\n if r.status_code != requests.codes.ok:\n return r.text, r.status_code\n\n return render_template('editor.html', article=r.json())", "def edit(\n id: int = typer.Argument(\n ...,\n help=\"ID of the log entry\"\n ),\n description: str = typer.Option(\n \"\", '--description',\n help=\"New Description for the log entry\"\n ),\n date: datetime = typer.Option(\n None, '--date', '-d',\n help=\"New Date for the log entry\"\n ),\n time: datetime = typer.Option(\n None, '--time', '-t',\n formats=[\"%H:%M:%S\", \"%I:%M %p\"],\n help=\"New Time for the log entry\"\n )\n):\n log_datetime = None\n\n if date and time:\n log_entry_time = time.time()\n log_datetime = datetime.combine(date, log_entry_time)\n\n manager = LogBookManager()\n updated, message = manager.update(\n id,\n description=description,\n log_datetime=log_datetime\n )\n\n if updated:\n typer.echo(\n typer.style(message, fg=typer.colors.GREEN, bold=True)\n )\n else:\n typer.echo(\n typer.style(message, fg=typer.colors.RED, bold=True)\n )", "def edit_render(entry_id):\n\n\tquery = 'SELECT title, text, id, project FROM entries WHERE id=\"%s\"' % str(entry_id)\n\tcur = g.db.execute(query)\n\tentry = [dict(\n\t\t\ttitle=row[0], \n\t\t\ttext=row[1], \n\t\t\tid=row[2], \n\t\t\tproject=row[3]) for row in cur.fetchall()]\n\n\treturn render_template('edit_entry.html', entry=entry)", "def EditArticle(request, article_id):\n category_list = Category.objects.all().order_by('created_time')\n tag_list = Tag.objects.all().order_by('created_time')\n article = Article.objects.get(id=article_id)\n\n GetWebSiteInfo()\n dic = {'category_list':category_list, 'tag_list': tag_list, 'article': article, 'WebSiteInfo': WebSiteInfo}\n return render(request, \"blog/edit_article.html\", dic)", "def edit_name(entry):\n entry.name = get_name()\n entry.save()\n input(\"Edit successful. \")\n return entry", "def save_edit(request, post_id):\n if request.method == \"PUT\":\n data = json.loads(request.body)\n user = request.user\n post = Post.objects.get(id=post_id)\n content = data.get(\"content\", \"\")\n # Check to make sure user attempting edit is author\n if user == post.author:\n post.content = content\n post.save()\n return JsonResponse({\"content\": post.content})\n else:\n return JsonResponse({\"message\": \"Not authorized to edit\"})", "def post(self, post_id):\n post = Post.get_by_id(int(post_id), parent=blog_key())\n\n if post and self.user.key().id() == post.user.key().id():\n post.subject = self.request.get('subject')\n post.content = self.request.get('content')\n\n # initialize error to false\n have_errors = False\n\n # if there is no subject or content, it will throw errors\n if not post.subject:\n error_subject = \"Please write down the subject\"\n have_errors = True\n if not post.content:\n error_content = \"Content is required\"\n have_errors = True\n\n if have_errors:\n self.render(\"edit.html\",\n subject=post.subject,\n content=post.content,\n error_subject=error_subject,\n error_content=error_content,\n user=self.user)\n else:\n post.put()\n self.redirect('/blog/%s' % str(post.key().id()))", "def edit_post(request, year, month, day, slug):\n post = get_model_for_date_and_slug(Post, year, month, day, slug)\n form = PostForm(instance=post)\n if request.method == \"POST\":\n form = PostForm(request.POST, instance=post)\n if form.is_valid():\n post = form.save()\n if \"continue_editing\" in request.POST:\n return http.HttpResponseRedirect(post.get_edit_url())\n return http.HttpResponseRedirect(post.get_absolute_url())\n return render_to_response(\"montgomery/edit_post.html\", {\"form\": form}, context_instance=RequestContext(request))", "def edit(ctx, docid, password):\n coll = db.get_document_collection(ctx)\n config = ctx.obj[\"config\"]\n\n doc, docid = db.get_document_by_id(ctx, docid)\n title = doc[\"title\"]\n\n template, c = db.get_content(ctx, doc, password=password)\n\n content, tmpfile = utils.get_content_from_editor(config[\"editor\"], template=template)\n d = datetime.datetime.now()\n\n if doc[\"encrypted\"] is True:\n title = utils.get_title_from_content(content)\n content = c.encrypt_content(content.decode(\"utf-8\").encode(\"utf-8\"))\n else:\n if not \"links\" in doc[\"categories\"]:\n title = utils.get_title_from_content(content)\n\n if isinstance(template, unicode):\n content = content.decode(\"utf-8\")\n\n if content != template:\n doc[\"content\"] = content\n doc[\"title\"] = title\n doc[\"updated\"] = d\n if validate(doc):\n coll.save(doc)\n else:\n utils.log_error(\"Validation of the updated object did not succeed\")\n\n transaction.log(ctx, docid, \"edit\", title)\n utils.log_info(\"Document \\\"%s\\\" updated.\" % title)\n else:\n utils.log_info(\"No changes detected for \\\"%s\\\"\" % title)\n\n utils.clean_tmpfile(tmpfile)\n\n return True", "def editPage(request, title):\n entry = util.get_entry(title)\n if request.method == \"POST\":\n # check if the data is valid then save/replace old data\n form = editPageForm(request.POST)\n if form.is_valid():\n title = form.cleaned_data[\"editTitle\"]\n content = form.cleaned_data[\"editBody\"]\n\n util.save_entry(title, content)\n\n # take user to their editted page\n return HttpResponseRedirect(reverse(\"entry\", kwargs={\n \"title\": title\n }))\n # give user a editting form with existing data filled in by defult. \n else:\n editForm = editPageForm(initial={\n \"editTitle\": title,\n \"editBody\": entry\n })\n editFormTitle = editForm[\"editTitle\"]\n editFormBody = editForm[\"editBody\"]\n return render(request, \"encyclopedia/editPage.html\", {\n \"formTitle\": editFormTitle,\n \"formBody\": editFormBody\n })", "def edit_post(post_id):\n\n form = forms.PostForm()\n posts = models.Post.select().where(models.Post.id == post_id)\n if posts.count() == 0:\n abort(404)\n elif form.validate_on_submit():\n models.Post.create(title=form.title.data,\n date=form.date.data,\n time_spent=form.time_spent.data,\n details=form.details.data,\n remember=form.remember.data)\n models.Post.get(models.Post.id == post_id).delete_instance()\n return redirect(url_for('index'))\n return render_template('edit.html', posts=posts, form=form)", "def edit(request, pageName):\n \n if request.method == \"POST\":\n form = EditForm(request.POST)\n \n if form.is_valid(): \n content = form.cleaned_data[\"content\"]\n title = form.cleaned_data[\"title\"]\n \n util.save_entry(title, content)\n return HttpResponseRedirect(reverse(\"encyclopedia:visit_entry\", args=(title, )))\n \n else:\n\n form = EditForm({'title': pageName, 'content': util.get_entry(pageName) })\n return render(request, \"encyclopedia/edit_page.html\", {\n \"form\": EditForm(),\n \"pageName\": pageName\n })\n \n \n return render(request, \"encyclopedia/edit_page.html\", {\n \"form\": EditForm({'title': pageName, 'content': util.get_entry(pageName) }),\n \"pageName\": pageName\n })", "def test_form_editing(self):\n update = {\n 'title': 'Last Post (Final)',\n 'content': '### Goodbye!',\n 'is_published': True,\n }\n\n form = self.form_cls(update, instance=self.entry)\n\n form.save()\n\n actual = models.Entry.objects.get(pk=self.entry.pk)\n self.assertEquals(actual.title, update['title'])\n self.assertEquals(actual.content.raw, update['content'])\n self.assertIsNotNone(actual.published_timestamp)", "def edit_link(db_object, text=None):\n if text is None:\n text = 'edit'\n return _make_link(db_object.update_url(), text)", "def db_entry_edit(db_in, table, entry_id, dict_in):\n connection = db_in.connection.cursor()\n\n # Update entries for each key and value.\n for key, value in dict_in.items():\n # Attempt to add column, fail silently if it exists.\n try:\n connection.execute('ALTER TABLE %s ADD COLUMN %s' % (table.name, key.replace(\"'\", \"''\")))\n except sqlite3.OperationalError:\n pass\n # Update the entry in the database.\n connection.execute(\"UPDATE '%s' SET %s='%s' WHERE id=%s;\" % (table.name, key, value, str(entry_id)))\n\n db_in.connection.commit()", "def deletePost(self, editLink, entryId): #$NON-NLS-1$\r\n\r\n deleteAtomEntry = self.createDeleteBlogEntry()\r\n deleteAtomEntry.setId(entryId)\r\n return self.deleteAtomEntry(editLink, deleteAtomEntry)", "def update_article(article_id):\n \n article = mongo.db.articles\n article.find_one_and_update({'_id': ObjectId(article_id) },\n {'$set':\n {'title': request.form.get('title'),\n 'content': request.form.get('content')\n }\n })\n flash('Your post has been updated.', 'success')\n return redirect(url_for('blog'))", "def modify_feed(request, feed_id):\n\n __time_update(request.user)\n\n try:\n feed = Feed.objects.get(id=feed_id, user=request.user)\n except KeyError:\n return render_to_response('message.html',\n {'message': 'There is no such feed.',\n 'back': '/feeds'})\n\n # Try to get data from form & update feed\n try:\n title = request.POST['title']\n url = request.POST['url']\n\n feed.title = title\n feed.url = url\n\n feed.save()\n return redirect('/feeds')\n # Or display new form, filled with current feed values\n except KeyError:\n return render_to_response('modify_feed.html',\n {'feed': feed,\n 'username': request.user.username})", "def view_blog(self):", "def test_editing_post(self):\n\n form_data = {\"meal-time\": \"2020-02-25 08:00:00\", \n \"meal-setting\": \"At home!\", \"TEB\": \"Some thoughts..\",\n \"hunger\": 2, \"fullness\": 3, \"meal-notes\": \"Some notes.\"}\n\n edit_post(1, \"/static/images/uploads/2.jpg\", form_data)\n\n post = Post.query.get(1)\n \n self.assertEqual(post.meal_setting, \"At home!\")\n self.assertEqual(post.satisfaction, None)\n self.assertNotEqual(post.fullness, 8)", "def get_view(post_id):\n # create DB connection\n db_connection = sqlite3.connect(DB_FILE)\n db_cursor = db_connection.cursor()\n post_info = get_post_info(db_cursor, post_id)\n # close DB connection\n db_connection.close()\n page_title = \"Blog Manager | Edit Post\"\n styles = [ '/static/stylesheets/blog/edit_post_style.css' ]\n scripts = [ '/static/scripts/jquery.js', '/static/blog/scripts/edit_post_scripts.js' ]\n return render_template(\"blog/edit_post.html\", page_title=page_title, styles=styles, scripts=scripts, post_info=post_info)", "def blogAdmin(request, urlname):\n\tblog = Blog.objects.get(authors=request.user, urlname=urlname)\n\teditor = request.user in blog.editors.all()\n\tposts = BlogEntry.qa_objects.filter(blog=blog).order_by('-posting_time')\n\treturn render_to_response('blogs/admin/blog_admin.html', {'user': request.user, 'editor': editor, 'blog': blog, 'posts': posts}, context_instance=RequestContext(request))", "def show_post_edit(post_id):\n\n post = Post.query.get_or_404(post_id)\n tags = Tag.query.all()\n\n return render_template(\"posts/edit_post.html\", post=post, tags=tags)", "def _updateFeed(pk):\n feed = get_object_or_404(Feed, pk=pk)\n\n rawFeed, entries = feed._fetch_feed() \n\n feed.title = rawFeed.get('title', None)\n feed.subtitle = rawFeed.get('subtitle', None)\n feed.copyright = rawFeed.get('rights', None)\n feed.ttl = rawFeed.get('ttl', None)\n feed.atomLogo = rawFeed.get('logo', None)\n\n # Try to find the updated time\n updated = rawFeed.get(\n 'updated_parsed',\n rawFeed.get('published_parsed', None),\n )\n\n if updated:\n updated = datetime.datetime.fromtimestamp(\n time.mktime(updated)\n )\n\n feed.pubdate = updated\n\n super(Feed, feed).save()\n\n if entries:\n dbEntriesCreate = []\n dbEntriesupdate = []\n for raw_entry in entries:\n entry = Entry.objects.parseFromFeed(raw_entry)\n entry.feed = feed\n\n try:\n newEntry = Entry.objects.get(guid=entry.guid, feed=feed)\n except:\n newEntry = None\n\n \n if newEntry:\n # if it was updated, then mark it as unread, otherwise no need to do anything\n if newEntry.date > entry.date:\n entry.state = ENTRY_UNREAD\n id = newEntry.id\n newEntry = entry\n newEntry.id = id\n dbEntriesupdate.append(newEntry)\n else:\n dbEntriesCreate.append(entry)\n\n with transaction.atomic():\n if len(dbEntriesCreate)>0:\n Entry.objects.bulk_create(dbEntriesCreate)\n if len(dbEntriesupdate)>0:\n fields = ['feed', 'state', 'title' , 'content', 'date', 'author', 'url' ,'comments_url']\n Entry.objects.bulk_update(dbEntriesupdate, fields)\n\n return", "def change_post(mfp_form, request):\n article_id = int(mfp_form.cleaned_data['id'])\n newpost = APost(article_id)\n if newpost.exist:\n newpost.article.title = mfp_form.cleaned_data['title']\n # 缩略名\n newpost.article.short_title = mfp_form.cleaned_data['short_title']\n newpost.article.cover = mfp_form.cleaned_data['cover_url']\n newpost.article.introduction = mfp_form.cleaned_data['introduction']\n newpost.article.content = mfp_form.cleaned_data['content']\n # id为2是已发布的文章,默认为已发布,后面再改\n newpost.article.status = Status.objects.get(id=2)\n tagids = mfp_form.cleaned_data['tags']\n if len(tagids) != 0:\n for tagid in tagids:\n tagid = int(tagid)\n tag = Tags.objects.get(id=tagid)\n newpost.article.tags.add(tag)\n threadtypeid = int(mfp_form.cleaned_data['threadtypeid'])\n newpost.article.threadtypeid = ThreadTypes.objects.get(id=threadtypeid)\n if mfp_form.cleaned_data['commentnotshow'] != '':\n newpost.article.comment_status = False\n else:\n newpost.article.comment_status = True\n return newpost.article\n else:\n return False", "def edit_task_name(entry):\n entry.task_name = get_task_name()\n entry.save()\n input(\"Edit successful. \")\n return entry", "def update(id):\r\n post = get_post(id)\r\n db = get_db()\r\n cur = db.cursor()\r\n\r\n if request.method == 'POST':\r\n title = request.form['title']\r\n body = request.form['body']\r\n error = None\r\n\r\n cur.execute('SELECT id FROM novel.post WHERE title = %s', title)\r\n newId = cur.fetchone()\r\n\r\n\r\n\r\n if not title:\r\n error = 'Title is required.'\r\n\r\n if newId and newId['id'] != id:\r\n error = 'Title is repeated.'\r\n\r\n if error is not None:\r\n flash(error)\r\n else:\r\n\r\n cur.execute(\r\n 'UPDATE novel.post SET title = \"{0}\", body = \"{1}\" WHERE id = {2}'\r\n .format(title, body, id)\r\n )\r\n db.commit()\r\n return redirect(url_for('novel.index'))\r\n\r\n return render_template('novel/update.html', post=post)", "def update(request):\n paste = Paste.get(request.matchdict['idContent'])\n\n password = _buildPassword(paste.username, paste.created, request.POST['password'])\n\n if password == paste.password:\n paste.title = request.POST['title']\n paste.content = request.POST['content']\n\n paste.save()\n\n request.session.flash(u\"Updated\") # TODO translatoion\n\n return HTTPFound(request.route_path('oneContent', idContent=paste._id))\n\n request.session.flash(u\"Wrong password\") # TODO translatoion\n\n return HTTPFound(request.route_path('edit', idContent=paste._id))", "def add_edited_post_to_db(post_id):\n post = Post.query.get_or_404(post_id)\n\n post.title=request.form['title']\n post.content=request.form['content']\n\n db.session.add(post)\n db.session.commit()\n\n flash(f'Post \"{post.title}\" was successfully edited')\n\n return redirect(f'/posts/{post_id}')", "def community_post_update_view(request, slug):\n task = \"Update\"\n post = CommunityPostModel.objects.get(slug=slug) # Get the post\n\n form = AddEditPostForm(instance=post) # An unbound form\n if request.method == 'POST': # If the form has been submitted...\n form = AddEditPostForm(request.POST, request.FILES, instance=post) # A form bound to the POST data\n if form.is_valid(): # All validation rules pass\n post = form.save() # Save the object to the database\n slug_str = \"%s %s\" % (post.title, post.date_posted) # Create a slug from the title and date\n post.slug = slugify(slug_str) # Create the slug\n post.save() # Save the object to the database\n return redirect('community-post-detail', slug=post.slug) # Redirect to the detail page\n\n context = { # Pass the variables to the template\n 'task': task,\n 'post': post,\n 'form': form,\n }\n return render(request,\n 'pages/patient-community/community-create-update-post.html',\n context) # render the patient community update page", "def edit():\n database.ask(mode='single')\n F = database.check(single=True)\n if F and hasattr(F,'edit'):\n name = database[0]\n F.edit(name)", "def add_blog(self, text):\n self.blog.add_blog(text)\n self.refresh()", "def blog_view(request):\n blog_id = int(request.matchdict.get('id', -1))\n entry = BlogRecordService.by_id(blog_id, request)\n if not entry:\n return HTTPNotFound\n return {'entry': entry}", "def test_blogpost_update_by_anonymous(self):\r\n user = self.create_users()[1]\r\n app = self.create_app(info=None)\r\n app.owner = user\r\n blogpost = Blogpost(owner=user, app=app, title='thisisatitle', body='body')\r\n db.session.add_all([user, app, blogpost])\r\n db.session.commit()\r\n url = \"/app/%s/%s/update\" % (app.short_name, blogpost.id)\r\n\r\n res = self.app.get(url, follow_redirects=True)\r\n assert res.status_code == 200, res.status_code\r\n assert \"Please sign in to access this page\" in res.data, res.data\r\n\r\n res = self.app.post(url,\r\n data={'id':blogpost.id,\r\n 'title':'new title',\r\n 'body':'new body'},\r\n follow_redirects=True)\r\n assert res.status_code == 200, res.status_code\r\n assert \"Please sign in to access this page\" in res.data\r\n\r\n blogpost = db.session.query(Blogpost).first()\r\n assert blogpost.title == 'thisisatitle', blogpost.title", "def createNewBlogEntry(self): #$NON-NLS-1$\r\n atomdoc = self._createNewEntryDocument()\r\n self._initNewEntryDocument(atomdoc)\r\n return ZAtomNewBlogEntry(atomdoc)", "def edit_topic():\n topic = db.topic(request.args(0))\n form = SQLFORM(db.topic, record=topic)\n form.vars.description = text_store_read(topic.description)\n if form.validate():\n topic.update_record(\n name=form.vars.name,\n )\n text_store_write(form.vars.description, key=topic.description)\n session.flash = T('The topic has been created')\n redirect(URL('default', 'index'))\n return dict(form=form)", "def edit_existing_thread(r: praw.Reddit, thread_id: str, text: str) -> None:\n r.validate_on_submit = True\n r.submission(thread_id).edit(text)\n logging.info('Updated weekly discussion thread')", "def getBlogEntry(self, id):\n return BlogEntry.create(self.pm_getSpaceManager().getBlogEntry(self._unbox(id)),self._modelDataManager)", "def edit_post_process(post_id):\n\n # extract form data, commit, then redirect to /users\n f_title = request.form[\"post-title\"].strip()\n f_content = request.form[\"post-content\"].strip()\n\n # msg will also include a field for the user_id for routing.\n msg = db_edit_post(post_id, f_title, f_content)\n\n flash(msg[\"text\"], msg[\"severity\"])\n\n return redirect(f\"/users/{msg['user_id']}\")", "def edit(self, new_content: str) -> None:\n\n # YOUR CODE HERE\n self.content = new_content", "def show_edit_post(post_id):\r\n post = Post.query.get_or_404(post_id)\r\n tags = Tag.query.all()\r\n return render_template('edit-post.html', post=post, tags=tags)", "def create_entry():\n new_entry = DB_Entry() # Create instance of entry to add the info to\n print('Eratosthenes is ready to add your new entry.\\n')\n new_entry.set_id()\n title = input('Enter the title:\\n')\n new_entry.set_title(title)\n authors = input('Enter the authors as list of surname, firstname separated by semicolons:\\n')\n new_entry.set_authors(authors)\n try:\n year = int(input('Enter the year:\\n'))\n except ValueError:\n try:\n year = int(input('Enter the year as an integer:\\n'))\n except ValueError:\n print('You failed to follow basic instructions. The year is set to 2000\\n')\n year = 2000\n new_entry.set_year(year)\n pub_type = input('Enter the publication type as article/review/book/other:\\n')\n try:\n new_entry.set_type(pub_type)\n except ValueError:\n try:\n pub_type = input('Type must be one of article/review/book/other:\\n')\n new_entry.set_type(pub_type)\n except ValueError:\n print('You failed to follow basic instructions. Type is now set to \\'other\\'\\n')\n pub_type = 'other'\n new_entry.set_type(pub_type)\n keywords = input('Enter list of keywords separated by semicolons:\\n')\n new_entry.set_keywords(keywords.split(';'))\n current_path = input('Enter the current path to the file\\n')\n current_path = current_path.replace('~', '/Users/marcus')\n if not os.path.isfile(current_path):\n print('File not found. Please try again')\n current_path = input('Enter the current path to the file\\n')\n if not os.path.isfile(current_path):\n print('File not found')\n new_entry.set_new_path()\n db_actions.copy_file(new_entry.get_path(), current_path)\n return new_entry", "def new(request):\n\n if request.method == 'POST':\n data = request.POST\n form = BlogEntryForm(creator=request.user, data=data)\n if form.is_valid():\n form.save()\n # Should we redirect to single entry view or to all?\n return HttpResponseRedirect(reverse('blog.list_all'))\n else:\n form = BlogEntryForm(creator=request.user)\n\n data = {'form': form, 'blog_info': get_blog_info()}\n data.update(csrf(request))\n return render_to_response('blog/new_blog.html', data,\n context_instance=get_rq(request))", "def edit_entry(table_id):\n\n print(\"\\nWould you simply like to simply\")\n edit_quest = input(\"[D]elete the record, or [E]dit it? \").upper()\n if edit_quest == 'D':\n Entry.get(Entry.id == table_id).delete_instance()\n clear()\n input('Entry has been deleted.\\nPress ENTER to Continue. ')\n return main()\n else:\n clear()\n print(\"Do you wish to change the DATE of the task?\")\n date_quest = input(\"[y/N] \").upper().strip()\n if date_quest == 'Y':\n while True:\n clear()\n print(\"Enter your task's new DATE using\")\n edited_date = input(\"[YYYY-MM-DD]: \").strip()\n \n try:\n task_dt = datetime.datetime.strptime(edited_date,\n '%Y-%m-%d')\n except ValueError:\n clear()\n input(\"The format provided was not correct. Try Again \")\n else:\n Entry.update(date=task_dt).where(\n Entry.id ==\n table_id).execute()\n break\n\n clear()\n print(\"Do you wish to change the NAME of the task?\")\n name_quest = input(\"[y/N] \").upper()\n if name_quest == 'Y':\n clear()\n edited_name = input('Enter your new task name: ')\n Entry.update(task=edited_name).where(\n Entry.id == table_id).execute()\n\n clear()\n print(\"Do you wish to change the NUMBER\")\n print(\"OF MINUTES TO COMPLETE the task?\")\n minutes_quest = input(\"[y/N] \").upper().strip()\n if minutes_quest == 'Y':\n while True:\n try:\n clear()\n print(\"Enter the new number of minutes for your task\")\n edited_minutes = int(input(\" (integers only): \"))\n except ValueError:\n clear()\n input(\"The format provided was not correct. Try Again \")\n else:\n Entry.update(time=edited_minutes).where(\n Entry.id == table_id).execute()\n break\n\n clear()\n print(\"Would you like to edit your NOTE from this task?\")\n note_quest = input(\"[y/N] \").upper().strip()\n if note_quest == 'Y':\n clear()\n edited_note = input('Enter your new note: ')\n Entry.update(note=edited_note).where(\n Entry.id == table_id).execute()\n return main()", "def update(src, des, tipe):\n from xbooks.Xinit import Xrc\n src = \"Xblog/docs/\" + src\n des = \"Xblog/docs/\" + des\n if \"Xblog/docs/notebooks/\" == des.replace(os.path.basename(des), \"\"):\n editNavBar(src, des, tipe, Xrc)\n else:\n editParentIndex(src, des, tipe, Xrc)\n ccc.success(\"updatation procedures for \" + des)", "def edit_textpost(request, textpost_id):\n\n textpost = TextPost.objects.get(id=textpost_id)\n textpost_age = get_submission_age(textpost)\n\n # Redirect unauthenticated users to register/ login.\n if not request.user.is_authenticated():\n return redirect('login')\n\n if request.method == 'POST':\n\n edit_textpost_form = TextPostForm(data=request.POST, instance=textpost)\n\n if edit_textpost_form.is_valid():\n # Make sure user can still edit,\n # and window has not passed since form displayed.\n if not can_edit_textpost(textpost, request):\n return redirect('/discuss/%s/' % textpost.id)\n\n edited_textpost = edit_textpost_form.save(commit=False)\n textpost.post_body = edited_textpost.post_body\n textpost.title = edited_textpost.title\n textpost.save()\n\n # Invalidate caches: This affects the discussion page for the textpost.\n # If title changed, also affects /index and /new caches.\n invalidate_caches('ed_news', 'index', 'new')\n invalidate_cache('discuss', (textpost.id, ), namespace='ed_news')\n\n # Redirect to discussion page.\n return redirect('/discuss/%s/' % textpost.id)\n\n else:\n # Invalid form/s.\n # Print errors to console; should log these?\n print 'ae', edit_textpost_form.errors\n\n else:\n # Send blank forms.\n edit_textpost_form = TextPostForm(instance=textpost)\n\n return render_to_response('ed_news/edit_textpost.html',\n {'edit_textpost_form': edit_textpost_form,\n 'textpost_id': textpost.id,\n },\n context_instance = RequestContext(request))", "def edit(self, new_content: object, reason: str = \"\") -> None:\n raise NotImplementedError", "def post(self):\n comment_id = self.request.get('comment_id')\n post_id = self.request.get('post_id')\n comment = Comment.get_by_id(int(comment_id), parent=comment_key())\n post = Post.get_by_id(int(post_id), parent=blog_key())\n if comment and self.user.key().id() == comment.user.key().id():\n comment.content = self.request.get('content')\n\n have_errors = False\n\n if not comment.content:\n error_content = \"Content is required\"\n have_errors = True\n\n if have_errors:\n self.render(\"edit_comment.html\",\n comment=comment,\n error_content=error_content,\n user=self.user)\n else:\n comment.put()\n time.sleep(0.1)\n\n self.redirect('/blog/%s' % str(post.key().id()))", "def editor_save():\n markdown = request.form.get('markdown')\n html = request.form.get('html')\n title = request.form.get('title')\n if 'post_id' in request.form:\n post_id = int(request.form.get('post_id'))\n edit_post = Post.query.get(post_id)\n edit_post.markdown = markdown\n edit_post.html = html\n edit_post.title = title\n db.session.add(edit_post)\n db.session.commit()\n return jsonify(saved_success=True, new_post=None, post_id=None)\n else:\n new_post = Post(markdown=markdown, html=html, title=title);\n db.session.add(new_post)\n db.session.commit()\n return jsonify(saved_success=True, new_post=True, post_id=new_post.id)", "def edit(request, article_id):\n try:\n article = Article.objects.get(pk=article_id)\n except Article.DoesNotExist:\n raise Http404(\"Article does not exist\")\n if request.method == 'POST': # フォームが提出された\n form = ArticleForm(request.POST, instance = article) # POST データの束縛フォーム\n if form.is_valid(): # バリデーションを通った\n article = form.save(commit=False)\n if form.cleaned_data['no_expired_at'] is True:\n article.expired_at = None\n article.save()\n return HttpResponseRedirect(reverse('article_list')) # POST 後のリダイレクト\n else:\n no_expired_at = False\n if article.expired_at is None:\n no_expired_at = True\n article.expired_at = datetime.now() + timedelta(days=1)\n form = ArticleForm(instance = article, initial = {'no_expired_at': no_expired_at, }) # 非束縛フォーム\n article_list = Article.objects.order_by('-released_at')[:5]\n auth_form = AuthenticationForm(None, request.POST or None)\n return render(request, 'app/article_edit.html', { \n 'form': form,\n 'title':'ニュース記事の編集',\n 'year':datetime.now().year,\n 'articles':article_list,\n 'blogs':EntryView.get_entry_list('-posted_at',-1, request.user.pk )[:5],\n 'submit_title':'更新する',\n 'article_pk':article.pk,\n 'auth_form':auth_form,\n 'current_user':request.user,\n })", "def show_edit_post(post_id):\n post = Post.query.get_or_404(post_id)\n\n return render_template('edit-post.html', post=post)", "def edit_announcement():\n # Implement me!\n\n announcement = get_announcement(request.vars.announcement_id, auth.user.email)\n\n announcement.description = request.vars.description\n announcement.name = request.vars.name\n announcement.updated_on = datetime.datetime.utcnow()\n announcement.update_record()\n return response.json(announcement)", "def edit_html():\n # save the new html_raw\n entry = request.form.get(\"entry\")\n entry_dict[entry]['html_raw'] = request.form.get(\"raw\")\n entry_dict[entry]['html'] = raw_to_html(entry, entry_dict[entry]['html_raw'])\n # save into pkl file\n with open(entry_dict_path, 'wb') as pkl_file:\n cPickle.dump(entry_dict, pkl_file)\n # update the website\n return jsonify(True)", "def post_update():\n\n\n user_id = session['user_id']\n post = request.form.get('post')\n\n Update.add_update(user_id, post)\n\n return \"Updated Post\"", "def edit_user(user_id):\n\n db_user = User.query.get_or_404(user_id)\n\n return render_template(\"edit_user.html\",\n headline=f\"Edit Blogly {db_user.get_full_name()}\",\n user=db_user)", "def edit_record(self, record):\r\n self.record.editObject(record, id=record['id'])" ]
[ "0.7704336", "0.76225203", "0.7564468", "0.7443002", "0.73097694", "0.72731054", "0.7030135", "0.7011261", "0.6999224", "0.6944325", "0.6901991", "0.6855571", "0.68527025", "0.6852159", "0.68337286", "0.67222476", "0.6703577", "0.66673446", "0.6648436", "0.66032463", "0.6591359", "0.6564939", "0.6548648", "0.6533431", "0.6495092", "0.6471168", "0.64411056", "0.643052", "0.6414917", "0.64045984", "0.6382693", "0.63809735", "0.6374838", "0.63746387", "0.6334431", "0.6313694", "0.6308859", "0.6281634", "0.6274331", "0.6273166", "0.6259134", "0.6257069", "0.6249478", "0.6228274", "0.622793", "0.6219292", "0.6199702", "0.6185204", "0.6184606", "0.61449397", "0.61442304", "0.6138899", "0.61272806", "0.6106933", "0.6092412", "0.6059118", "0.6022481", "0.6016102", "0.60129774", "0.6010035", "0.60003686", "0.599514", "0.59736806", "0.5968246", "0.596164", "0.595375", "0.5942383", "0.5938929", "0.59360856", "0.59354526", "0.5926668", "0.5905763", "0.59015733", "0.58978325", "0.58935684", "0.5885452", "0.5873348", "0.5870471", "0.5853094", "0.5845868", "0.58431983", "0.5832471", "0.5831475", "0.5818843", "0.5813135", "0.5800296", "0.57944745", "0.5787919", "0.578093", "0.5774652", "0.5767038", "0.5764922", "0.57635754", "0.5762239", "0.5746403", "0.5744313", "0.57377845", "0.5733775", "0.5732203", "0.5728268" ]
0.72877353
5
Context manager to temporarily change to a new directory.
def chdir(new_dir): cur_dir = os.getcwd() os.chdir(new_dir) try: yield finally: os.chdir(cur_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def use_dir(new_dir):\n owd = os.getcwd()\n os.chdir(new_dir)\n\n try:\n yield\n finally:\n os.chdir(owd)", "def cd_manager(self, new_wd):\n old_wd = self.cwd\n self.cd(new_wd)\n yield self.cwd\n self.cd(old_wd)", "def __enter__(self):\n self.savedPath = os.getcwd()\n os.chdir(self.newPath)", "def chdir(new_dir):\n cur_dir = os.getcwd()\n # This is weird behavior. I'm removing and and we'll see if anything breaks.\n #safe_makedir(new_dir)\n os.chdir(new_dir)\n try:\n yield\n finally:\n os.chdir(cur_dir)", "def chdir(new_dir):\n cur_dir = os.getcwd()\n # FIXME: currently assuming directory exists\n safe_makedir(new_dir)\n os.chdir(new_dir)\n try:\n yield\n finally:\n os.chdir(cur_dir)", "def change_dir(new_dir):\n old_dir = os.getcwd()\n os.chdir(new_dir)\n try:\n yield os.getcwd()\n finally:\n os.chdir(old_dir)", "def cd(newdir):\n prevdir = os.getcwd()\n os.chdir(os.path.expanduser(newdir))\n try:\n yield\n finally:\n os.chdir(prevdir)", "def change_dir_without_context_manager(filename1, filename2):", "def fresh_directory():\n os.chdir(tempfile.mkdtemp())", "def pushd(cls, new_dir):\n previous_dir = os.getcwd()\n try:\n new_ab_dir = None\n if os.path.isabs(new_dir):\n new_ab_dir = new_dir\n else:\n new_ab_dir = os.path.join(previous_dir, new_dir)\n # Use absolute path to show it on FileNotFoundError message.\n cls.cd(new_ab_dir)\n yield\n finally:\n cls.cd(previous_dir)", "def cd(dir):\n old = os.getcwd()\n os.chdir(dir)\n yield\n os.chdir(old)", "def dir_context(directory):\n curdir=os.getcwd()\n os.chdir(directory)\n try:\n yield directory\n finally:\n os.chdir(curdir)", "def cd(new_directory=None):\n if new_directory is None:\n new_directory = \".\"\n previous_directory = os.getcwd()\n if not os.path.isdir(new_directory):\n os.mkdir(new_directory)\n new_directory = os.path.expanduser(new_directory)\n os.chdir(new_directory)\n try:\n yield\n finally:\n os.chdir(previous_directory)", "def excursion(directory):\n old_dir = os.getcwd()\n try:\n os.chdir(directory)\n yield\n finally:\n os.chdir(old_dir)", "def changeDirectory(self, directory):\n self._cwd = directory", "def ChangeDir(self, path: str) -> None:\n ...", "def change_dir(path): \r\n os.chdir(path)", "def change_working_directory(directory=None):\n if directory:\n try:\n saved_directory = os.getcwd()\n logger.info(\"Changing to shadow directory: {}\".format(directory))\n os.chdir(directory)\n yield\n finally:\n os.chdir(saved_directory)\n else:\n yield", "def change_working_directory(path):\n prev_cwd = os.getcwd()\n os.chdir(path)\n try:\n yield os.getcwd()\n finally:\n os.chdir(prev_cwd)", "def change_directory(path):\n os.chdir(path)", "def changeDirectory( self, directory ):\n if directory[0] == '/':\n directory = directory.lstrip( '/' )\n self.cwd = '%s/%s' % ( self.cwd, directory )", "def usedir(dir):\n curr = os.getcwd()\n os.chdir(dir)\n try:\n yield\n finally:\n os.chdir(curr)", "def cd(directory):\n old = os.getcwd()\n try:\n os.chdir(directory)\n yield\n finally:\n os.chdir(old)", "def inside_dir(dirpath):\n old_path = os.getcwd()\n try:\n os.chdir(dirpath)\n yield\n finally:\n os.chdir(old_path)", "def working_directory(path):\n prev_cwd = os.getcwd()\n os.chdir(path)\n yield\n os.chdir(prev_cwd)", "def as_cwd(self):\n old = self.chdir()\n try:\n yield old\n finally:\n if old is not None:\n old.chdir()", "def _remember_cwd():\r\n curdir = os.getcwd()\r\n try:\r\n yield\r\n finally:\r\n os.chdir(curdir)", "def change_dir(destination):\n cwd = os.getcwd()\n try:\n os.chdir(destination)\n yield\n finally:\n os.chdir(cwd)", "def working_dir(new_dir):\n cwd = os.getcwd()\n if cwd == \"\":\n # edge case observed in some unittests\n cwd = os.environ['PWD']\n try:\n os.chdir(new_dir)\n yield new_dir\n finally:\n os.chdir(cwd)", "def cd(dirname):\n original = os.getcwd()\n os.chdir(dirname)\n try:\n yield\n finally:\n os.chdir(original)", "def tmp_chdir(path):\n prev_cwd = os.getcwd()\n os.chdir(path)\n try:\n yield\n finally:\n os.chdir(prev_cwd)", "def pushd(directory):\n prevdir = os.getcwd()\n os.chdir(directory)\n try:\n yield prevdir\n finally:\n os.chdir(prevdir)", "def chdir(self, directory):\n self.eval(\"cd('{0}')\".format(directory))", "def cd(path):\n oldPath = os.getcwd()\n os.chdir(path)\n try:\n yield\n finally:\n os.chdir(oldPath)", "def cd(path):\n old_path = os.getcwd()\n yield os.chdir(path)\n os.chdir(old_path)", "def cd(path):\n old_dir = os.getcwd()\n os.chdir(path)\n try:\n yield\n finally:\n os.chdir(old_dir)", "def __enter__(self):\n runLog.debug(\"Changing directory to {}\".format(self.destination))\n self.moveFiles()\n self.open()\n return self", "def chdir(tmpdir):\n tmpdir.chdir()", "def chdir(self):\r\n self.directory=tkf.askdirectory()", "def exec_from_dirname(self):\n original = os.path.realpath(os.curdir)\n os.chdir(self.dirname)\n try:\n yield\n finally:\n os.chdir(original)", "def chdir(self):\n try:\n old = self.__class__()\n except error.ENOENT:\n old = None\n error.checked_call(os.chdir, self.strpath)\n return old", "def change_dir(self):\n self.working_dir = self.state_frame[0]\n self.state = STATE_READ_LINE", "def withDirectoryChange(path, allow_none=False):\n\n # spellchecker: ignore chdir\n\n if path is not None or not allow_none:\n old_cwd = os.getcwd()\n os.chdir(path)\n\n yield\n\n if path is not None or not allow_none:\n os.chdir(old_cwd)", "def chdir(dir):\n orig_cwd = os.getcwd()\n os.chdir(dir)\n try:\n yield\n finally:\n os.chdir(orig_cwd)", "def remember_cwd():\n curdir= os.getcwd()\n try: yield\n finally: os.chdir(curdir)", "def chdir(where):\n from twill import commands\n \n cwd = os.getcwd()\n _dirstack.append(cwd)\n print(cwd)\n\n os.chdir(where)\n print('changed directory to \"%s\"' % (where,), file=commands.OUT)\n\n commands.setglobal('__dir__', where)", "def working_directory(path):\n prev_cwd = Path.cwd()\n os.chdir(path)\n try:\n yield\n finally:\n os.chdir(prev_cwd)", "def changeCWD(self):\n new_dir = QtWidgets.QFileDialog.getExistingDirectory(self, \"New working directory\", os.getcwd())\n\n logging.debug(\"Changing directory: '%s'\", new_dir)\n\n if new_dir and os.path.isdir(new_dir):\n os.chdir(new_dir)\n self.updateCWD()", "def tempdir():\n\n class ctx(object):\n\n def __enter__(self):\n self.prevdir = os.getcwd()\n self.tempdir = tempfile.mkdtemp()\n os.chdir(self.tempdir)\n return self.tempdir\n\n def __exit__(self, *a, **kwa):\n os.chdir(self.prevdir)\n shutil.rmtree(self.tempdir)\n\n return ctx()", "def _processNewDirectory(self, dirpath):\n self._parent.processDirectory(dirpath)", "def cd(self, directory):\n return ChangeDir(directory)", "def working_dir(path):\n starting_path = os.getcwd()\n os.chdir(path)\n yield\n os.chdir(starting_path)", "def change_dir(filename):", "def chdir(self, path):\n # temporarily join the specified directory to see if we have\n # permissions to do so\n basedir = os.getcwd()\n try:\n os.chdir(path)\n except os.error:\n raise\n else:\n os.chdir(basedir)\n self.cwd = self.fs2ftp(path)", "def chdir_in_and_out(request, path):\n oldWorkDirStr = str(local.cwd)\n workDir = local.cwd\n workDir.chdir(path)\n request.addfinalizer(lambda: workDir.chdir(oldWorkDirStr))\n return type(\"\", (), {\"oldWorkDirStr\": oldWorkDirStr})", "def change_cwd(path):\n saved_dir = os.getcwd()\n os.chdir(path)\n try:\n yield os.getcwd()\n finally:\n os.chdir(saved_dir)", "def test_change_dir(test_output_dirs: OutputFolderForTests) -> None:\n os.chdir(test_output_dirs.root_dir)\n assert Path.cwd() == test_output_dirs.root_dir\n new_dir = test_output_dirs.root_dir / \"foo\"\n new_dir.mkdir()\n with change_working_directory(new_dir):\n assert Path.cwd() == new_dir\n Path(\"bar.txt\").touch()\n assert Path.cwd() == test_output_dirs.root_dir\n assert (new_dir / \"bar.txt\").is_file()", "def f_chdir(my_dir):\n if not os.path.isdir(my_dir):\n os.mkdir(my_dir)\n os.chdir(my_dir)", "def cd(dirname):\n _curdir = getcwd()\n chdir(dirname)\n yield\n chdir(_curdir)", "def chdir_tmp(self):\n dirname = make_tempdir()\n os.chdir(dirname)\n\n return dirname", "def chdir_tmp(self):\n dirname = make_tempdir()\n os.chdir(dirname)\n\n return dirname", "def resetWorkingDirectory( self ):\n self.cwd = self.path", "def generator_start_dir() -> str:\n old_cwd = os.getcwd()\n newpath = tempfile.mkdtemp()\n os.chdir(newpath)\n try:\n yield newpath\n finally:\n os.chdir(old_cwd)\n shutil.rmtree(newpath, ignore_errors=True)", "def working_directory(dir):\n cwd = os.getcwd()\n os.chdir(dir)\n try:\n yield\n finally:\n os.chdir(cwd)", "def working_directory(dir):\n cwd = os.getcwd()\n os.chdir(dir)\n try:\n yield\n finally:\n os.chdir(cwd)", "def saved_cwd():\n saved = os.getcwd()\n try:\n yield\n finally:\n os.chdir(saved)", "def pushcwd(iv=None):\n\tcwd = os.getcwd()\n\ttry:\n\t\tdirname = None;\n\t\tif(isinstance(iv, str)):\n\t\t\tif(os.path.exists(iv)):\n\t\t\t\tif(os.path.isdir(iv)):\n\t\t\t\t\tdirname = iv;\n\t\t\t\telse:\n\t\t\t\t\tdirname = os.path.dirname(iv);\n\n\t\tif dirname is not None:\n\t\t\tos.chdir(dirname)\n\t\tyield\n\tfinally:\n\t\tos.chdir(cwd);", "def cd(self, path, remember=True): # pylint: disable=invalid-name\n self.enter_dir(path, remember=remember)", "def changeDirectory(self, directory):\n self.pushMode(CLI_MODES.shell)\n output = self.sendCmd(\"cd %s\" % directory)\n self.popMode()\n if \"No such file or directory\" in output:\n logger.error (\"No such file or directory exist : %s\" %directory)\n return output", "def __exit__(self, etype, value, traceback):\n os.chdir(self.savedPath)", "def cd(path):\n\n cwd = os.getcwd()\n\n try:\n os.chdir(path)\n except OSError:\n raise RuntimeError('Can not change directory to {}'.format(path))\n\n try:\n yield\n except Exception:\n logger.error(\n 'Exception caught: {}'.format(' - '.join(sys.exc_info()[:2]))\n )\n raise RuntimeError('Failed code in new directory {}'.format(path))\n finally:\n os.chdir(cwd)", "def cd(path):\n if not path:\n yield\n return\n curdir = path_utils.getcwd()\n os.chdir(path)\n try:\n yield\n finally:\n os.chdir(curdir)", "def restore_cwd():\n cwd = os.getcwd()\n try:\n yield\n finally:\n os.chdir(cwd)", "def cd(self, path):\n with self.lock:\n if path == '-':\n path = self.pwd\n\n path = pathlib.Path(path)\n if not path.is_absolute():\n new_cwd = (self.cwd / path).resolve()\n else:\n new_cwd = path.resolve()\n\n if not new_cwd.is_dir():\n raise RuntimeError(f\"{new_cwd!s} is not a directory\")\n\n self.pwd = self.cwd\n self.cwd = new_cwd", "def editdirectory(self):\n\n ## Have user select existing directory\n new_directory = str(QtGui.QFileDialog.getExistingDirectory(self, \"Select Directory\",\n '/home/lsst/Data/'))\n\n ## If return is not NULL, set the DATA_DIRECTORY and update filename\n if new_directory:\n\n try:\n os.makedirs(new_directory)\n except OSError:\n if not os.path.isdir(new_directory):\n self.logger.exception(\"An error occurred while creating a new directory.\")\n\n global DATA_DIRECTORY\n DATA_DIRECTORY = new_directory\n self.displaydirectory()\n self.logger.info(\"Data directory changed to {0}.\".format(new_directory))", "def __enter__(self):\n dirpath_bytes = tempfile.mkdtemp()\n self.dirpath = str(dirpath_bytes.replace('\\\\', '\\\\\\\\'))\n return self.dirpath", "def work_in(dirname=None):\n curdir = os.getcwd()\n try:\n if dirname is not None:\n os.chdir(dirname)\n yield\n finally:\n os.chdir(curdir)", "def cd(cls, directory):\n Log.debug('CMD: cd {0}'.format(directory))\n os.chdir(directory)", "def change_to_current_path(to_change_path):\n os.chdir(to_change_path)", "def update_dir(self, new_dir):\n self.save_loc.setText(new_dir)", "def cd_tmp_dir(tmp_path):\n os.chdir(tmp_path)", "def tmp_dir(monkeypatch):\n try:\n tmp_dir = tempfile.mkdtemp()\n yield tmp_dir\n finally:\n # tmp_dir を削除するためにカレントディレクトリを移動\n monkeypatch.chdir(os.path.dirname(tmp_dir))\n shutil.rmtree(tmp_dir)", "def _restore_orig_directory(self):\n if not self._is_temp_dir:\n return\n self._base_data_dir = self._orig_base_data_dir\n del self._orig_base_data_dir\n self._base_logs_dir = self._orig_base_logs_dir\n del self._orig_base_logs_dir\n self.db.change_path(self._base_data_dir / \"projects.db\")\n self.set_current(\"default\", update=False)\n self._is_temp_dir = False", "def temporary(self, path):\r\n if path is None:\r\n raise ValueError('Can only temporarily establish a build root given a path.')\r\n prior = self._root_dir\r\n self._root_dir = path\r\n try:\r\n yield\r\n finally:\r\n self._root_dir = prior", "def set_workspace(self):\n try:\n chdir(self._path_temp) # assure we stay in the workspace\n except FileNotFoundError:\n raise MissingContextError(\"Context does not exist!\") from None", "def root_dir():\n assert root is not None\n\n old = _os.getcwd()\n try:\n _os.chdir(root)\n yield root\n finally:\n _os.chdir(old)", "def _use_temp_directory(self):\n if not self._is_temp_dir:\n self._orig_base_data_dir = self._base_data_dir\n self._orig_base_logs_dir = self._base_logs_dir\n temp_dir = Path(tempfile.mkdtemp())\n self._base_data_dir = temp_dir / \"data\"\n self._base_logs_dir = temp_dir / \"logs\"\n self.db.change_path(\":memory:\")\n self.set_current(\"default\", update=False)\n self._is_temp_dir = True\n return temp_dir", "def cd(self):\n if not self.exists():\n print 'ERROR: unable to cd to ' + self.get()\n try:\n os.chdir(self.get())\n except os.error as err:\n print 'ERROR: ' + str(err)\n return", "def chdir(self) -> None:\n os.chdir(self.analysis.get_bam_dir())", "def set_dir(self, d):\n mkdir_if_dne(d)\n old_workspace = self._wdir\n self._wdir = d\n if not d == old_workspace and old_workspace is not None:\n self.copy_contents(old_workspace)", "def reset_cache_dir(self):\n self.manager.reset_cache_dir()", "def change_base_dir():\n global base_dir\n while True:\n new_base = raw_input(\"New user directory? \")\n new_base = os.path.abspath(new_base)\n if os.path.exists(new_base):\n if os.path.isfile(new_base):\n print(\"ERROR: there is an existing file with that name.\")\n continue\n # make sure user can read and write this directory\n if not os.access(new_base, os.R_OK | os.W_OK):\n print(\"ERROR: directory access restricted\")\n continue\n print(\"OK: using existing directory\")\n break\n else:\n try:\n os.mkdir(new_base, 0700)\n except Exception:\n print(\"ERROR: directory creation failed\")\n continue\n print(\"OK: Created new directory.\")\n break\n\n from textwrap import wrap\n msg = wrap(\"\"\"WARNING: Your user files will be created in the directory\n'%(new_base)s' you have chosen. To access these files, you will either have\nto use the argument \"-dir:%(new_base)s\" every time you run the bot, or set\nthe environment variable \"PYWIKIBOT2_DIR\" equal to this directory name in\nyour operating system. See your operating system documentation for how to\nset environment variables.\"\"\" % locals(), width=76)\n for line in msg:\n print line\n ok = raw_input(\"Is this OK? ([yes], [N]o) \")\n if ok in [\"Y\", \"y\"]:\n base_dir = new_base\n return True\n print \"Aborting changes.\"\n return False", "def change_dir(dir, logger, throw_exception=True):\n\n logger.debug('ChgDir: '+dir+' (from '+str(os.getcwd())+')')\n\n status = os.chdir(dir)\n\n if status:\n if throw_exception:\n raise StopError('Problem changing to directory '+dir)\n else:\n logger.error('Problem changing to directory '+dir)", "def in_dir(directory):\n current_dir = os.getcwd()\n os.chdir(directory)\n\n # Add code that lets you handle errors\n try:\n yield\n # Ensure the directory is reset,\n # whether there was an error or not\n finally:\n os.chdir(current_dir)", "def set_path(self, directory):\n self.directory = directory", "def set_directory(self, directory):\n\t\tself.edit.set_text(directory)", "def init(cls, dirpath=\".\", newdir=True):\n dirpath = dirpath if dirpath[-1] == os.sep else dirpath + os.sep \n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n if newdir:\n now = datetime.datetime.now()\n dirpath = dirpath + now.strftime(\"%y-%m-%d_%H-%M-%S\") + os.sep\n os.mkdir(dirpath)\n cls.dirpath = dirpath\n cls.__data.clear()\n cls.__counters.clear()", "def open(self):\n if self.destination:\n _changeDirectory(self.destination)", "def SetPath(self, directory):\r\n\r\n if directory is not None and exists(directory) and isdir(directory):\r\n self.directory = directory" ]
[ "0.7734695", "0.75994277", "0.7524285", "0.74483794", "0.7443039", "0.7389849", "0.7278982", "0.71953887", "0.7141239", "0.7134059", "0.7131782", "0.70410705", "0.70233524", "0.7012039", "0.69601715", "0.695928", "0.69448084", "0.6925167", "0.6922496", "0.69155693", "0.69027984", "0.68911266", "0.6887719", "0.68865204", "0.688595", "0.6874555", "0.6867185", "0.68431413", "0.68321", "0.6829545", "0.68213606", "0.68107194", "0.67278516", "0.67127734", "0.6711246", "0.67046183", "0.6702546", "0.6686412", "0.6684906", "0.6672057", "0.6658128", "0.6657742", "0.6650049", "0.6647305", "0.6630243", "0.65744704", "0.65667135", "0.655198", "0.65280783", "0.6526947", "0.6505104", "0.6503215", "0.64935535", "0.64812994", "0.6434986", "0.6430269", "0.6422309", "0.6402994", "0.6380871", "0.637499", "0.637499", "0.6348914", "0.634363", "0.63311946", "0.63311946", "0.6306584", "0.6273239", "0.6269306", "0.6248413", "0.62275267", "0.6221681", "0.6208141", "0.6194908", "0.61693233", "0.6160267", "0.6153819", "0.6133241", "0.61146796", "0.61094934", "0.6089609", "0.60723925", "0.6068795", "0.60616547", "0.6035426", "0.60310555", "0.60154796", "0.600527", "0.6001623", "0.59889954", "0.598283", "0.59767944", "0.59668094", "0.59531385", "0.5946373", "0.5940786", "0.5931275", "0.59312344", "0.5916591", "0.5892796" ]
0.74489474
4
Ensure that a folder exists and create it if it doesn't, including any parent folders, as necessary.
def create_folder(target_folder): try: os.makedirs(target_folder) except OSError as e: pass return os.path.exists(target_folder)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ensure_folder(*arg):\n if len(arg) == 0:\n raise Exception(\"No input to ensure_folder\")\n path = get_dir(Path(*arg))\n path.mkdir(parents=True, exist_ok=True)", "def create_folder_if_needed(path):\n if os.path.exists(path):\n print(\"{} dir exists\".format(path))\n else:\n print(\"{} dir does not exist. Creating dir.\".format(path))\n os.mkdir(path)", "def ensure_folder_exists(folder_path: str) -> None:\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)", "def create_folder(path):\n folder_missing = not os.path.exists(path)\n\n if folder_missing:\n # Using makedirs since the path hierarchy might not fully exist.\n try:\n os.makedirs(path)\n except OSError as e:\n if (e.errno, e.strerror) == (17, 'File exists'):\n print(e)\n else:\n raise\n\n print('Created folder {0}'.format(path))\n\n return folder_missing", "def create_folder(path):\n if not exists(path):\n os.makedirs(path)", "def _create_folder(file_path):\r\n file_base = os.path.dirname(file_path)\r\n if not os.path.exists(file_base):\r\n try:\r\n os.makedirs(file_base)\r\n except OSError as e:\r\n if e.errno != errno.EEXIST:\r\n raise", "def _create_folder_if_not_exist(filename):\n os.makedirs(os.path.dirname(filename), exist_ok=True)", "def create_folder(folder):\n import errno\n try:\n os.makedirs(folder)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise e", "def CreateFolderIfNotExisting(folder_path, communicator):\n if not os.path.isdir(folder_path) and communicator.MyPID() == 0:\n os.makedirs(folder_path)\n communicator.Barrier()", "def check_make(folder_check):\n if not os.path.isdir(folder_check):\n os.mkdir(folder_check)", "def create_folder(path: str):\n try:\n Path(path).mkdir(parents=True, exist_ok=True)\n return True\n except:\n print(\"An error occured.\")", "def create_folder(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def create_folder(folder):\n if not os.path.exists(folder):\n os.makedirs(folder)", "def create_folder(folder):\n if not os.path.exists(folder):\n os.makedirs(folder)\n logging.debug(\"Folder %s Created!\" % folder)\n else:\n logging.debug(\"Folder %s Exists!\" % folder)", "def folder_guard(folder_path):\n if not os.path.isdir(folder_path):\n print('INFO:folder_guard(): Creating folder: ' + folder_path + '...')\n os.mkdir(folder_path)", "def create_folder(folder):\n flag = True\n if not os.path.exists(folder):\n try:\n os.makedirs(folder)\n initlog('Folder path:%s created by me; ' % folder) \n except Exception, e:\n initlog('failed to create Folder path; %s' % str(e))\n flag = False\n return flag", "def create_folder(folder_name):\n\n try:\n os.makedirs(folder_name)\n except FileExistsError:\n pass", "def create_dir_if_necessary(path):\n try:\n os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise", "def ensure_dirs(cls, folder_path):\n try:\n cls.mkdirs(folder_path)\n except exceptions.PlotlyRequestError as e:\n if \"already exists\" in e.message:\n pass\n else:\n raise e", "def create_test_folder_if_does_not_exist(path):\n print('')\n if os.path.exists(path):\n print(' Skip creation of existing folder: {}'.format(path))\n else:\n print(' Create non-existing test folder: {}'.format(path))\n os.makedirs(path, mode=0o775)", "def ensure_dir(path):\n parent = os.path.dirname(path)\n if not os.path.exists(parent):\n os.makedirs(parent)", "def _check_directory(my_folder):\n if not os.path.exists(my_folder):\n os.makedirs(my_folder)", "def createFolder(folder):\n folder_ = os.path.join(os.getcwd(),folder)\n if not(os.path.isdir(folder_)):\n os.mkdir(folder_)", "def folder_guard(folder_path):\n \n if not os.path.isdir(folder_path):\n print('INFO:folder_guard(): Creating folder: ' + folder_path + '...')\n os.mkdir(folder_path)", "def createFolder(self):\n self.destination = self.getPath() #Find the destination to create the folder\n try:\n os.makedirs(self.destination) #Try and make a folder\n except FileExistsError:\n pass #Otherwise continue if an error is encountered because the file exists already", "def ensure_dir_exists(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def ensure_folders_if(path, condition=True):\n if not os.path.exists(path) and condition:\n os.makedirs(path)", "def create_folder(folder_path: str) -> None:\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)", "def create_folder(path):\n try:\n os.listdir(path)\n except:\n os.makedirs(path)\n else:\n shutil.rmtree(path)\n os.makedirs(path)\n return path", "def create_folder(name_folder: str):\n try:\n # Create a new direcctory\n os.mkdir(name_folder)\n except FileExistsError:\n # If the direcctory already exits print.\n print(f\"The directory {name_folder} already exists.\")", "def check_folder(filepath):\n if not os.path.exists(filepath):\n os.mkdir(filepath)\n return filepath", "def create_folder(path: str):\n if not os.path.exists(path):\n os.makedirs(path)", "def mkdir(folder_name: str) -> None:\n if exist(folder_name):\n print(\"The folder is already exist\")\n return \n\n os.mkdir(folder_name)", "def prepare_folder(path):\n if not os.path.isdir(path):\n os.makedirs(path)", "def _folderCheck(self, folder):\n logger.debug(\"Func: _folderCheck\")\n\n if not os.path.isdir(os.path.normpath(folder)):\n os.makedirs(os.path.normpath(folder))", "def create_folder(location: str):\n try:\n os.mkdir(location)\n except FileExistsError:\n pass", "def EnsureDirExists(path):\n try:\n os.makedirs(os.path.dirname(path))\n except OSError:\n pass", "def mkdir_if_not_exists(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def maybe_makedirs(path_to_create):\n try: \n os.makedirs(path_to_create)\n except OSError:\n if not os.path.isdir(path_to_create):\n raise", "def check_folder(directory):\n global path_checked\n if not os.path.exists(directory):\n os.makedirs(directory)\n else:\n path_checked = True", "def ensure_path(full_path):\n full_path = Path(full_path)\n if not full_path.exists():\n full_path.mkdir(parents=True, exist_ok=True)", "def create_folder_path(folder_path):\n try:\n if os.path.exists(folder_path):\n shutil.rmtree(folder_path)\n os.makedirs(folder_path)\n except Exception:\n raise Error('Create {folder_path} exception'.format(folder_path))", "def create_folder(folder_name):\n if not os.path.exists(folder_name):\n os.makedirs(folder_name)", "def create_folder(folder_name):\n if not os.path.exists(folder_name):\n os.makedirs(folder_name)", "def ensure_dir(dir_path):\n try:\n os.mkdir(dir_path)\n except FileExistsError:\n pass", "def _ensure_dir(dir_name):\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)", "def create_directory(parent_path, new_folder):\n newdir = os.path.join(parent_path, new_folder)\n if os.path.isdir(newdir):\n return False\n else:\n os.mkdir(newdir)\n return True", "def ensure_dir(path):\n\n \n try:\n os.makedirs(path)\n except (EnvironmentError) as e:\n if not(e.errno == errno.EEXIST and \n e.filename == path):\n raise\n return", "def ensure_directory_exists(path):\n directory = os.path.dirname(path)\n if not os.path.exists(directory):\n os.makedirs(directory, exist_ok=True)\n return", "def _mkdir_if_not_exist(path):\n if not(os.path.isdir(path)):\n os.mkdir(path)\n else:\n _logger.info('Skipping existing directory %s' % path)", "def create_folder(folder):\n\n try:\n os.mkdir(folder, 0740)\n except OSError:\n return False\n else:\n return True", "def ensure_directory(path):\n\tdir_path = os.path.dirname(path)\n\tif os.path.exists(dir_path):\n\t\treturn\n\tensure_directory(dir_path)\n\ttry:\n\t\tos.mkdir(dir_path)\n\texcept OSError as e:\n\t\t# Ignore if EEXISTS. This is needed to avoid a race if two getters run at once.\n\t\tif e.errno != errno.EEXIST:\n\t\t\traise", "def ensure_dir_exists(path: Union[str,Path]) -> None:\n# path = str(path)\n assert not os.path.isfile(path)\n os.makedirs(path, exist_ok=True)\n assert os.path.isdir(path)", "def _ensure_dir(directory):\r\n try:\r\n os.makedirs(directory)\r\n except OSError as exc:\r\n if exc.errno == errno.EEXIST:\r\n pass\r\n else:\r\n raise", "def ensure_dir_exists(dir_path):\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)", "def ensure_dirpath_exists(path: Path) -> Path:\n assert path\n out_path: Path = path\n\n if not out_path.exists():\n out_path.mkdir(parents=True, exist_ok=True)\n\n return out_path", "def ensure_directory(explorer, parent_id, dirname):\n cache_key = (parent_id, dirname)\n if cache_key in DIR_CACHE:\n return DIR_CACHE[cache_key]\n\n for folder in explorer.list_folder(parent_id):\n if folder['name'] == dirname:\n folder_id = folder['id']\n break\n else:\n print(\"Creating folder {!r} in parent {}\".format(dirname, parent_id))\n folder_id = explorer.create_folder(dirname, parent_id)\n DIR_CACHE[cache_key] = folder_id\n return folder_id", "def create_folder(self, unformatted_path):\n os.makedirs(self.format_path(unformatted_path), exist_ok=True)", "def maybe_makedirs(path_to_create):\n try:\n os.makedirs(path_to_create)\n except OSError:\n if not os.path.isdir(path_to_create):\n raise", "def make_dir_if_needed(path):\n\n if not os.path.exists(path):\n os.makedirs(path)\n return path", "def create_folder(directory):\n try:\n if not os.path.exists(directory):\n os.makedirs(directory)\n except OSError:\n print('Error: The folder wasn\\'t able to be created: ' + directory)", "def _ensure_directory(self, dirname):\n if not os.path.exists(dirname):\n os.makedirs(dirname)", "def create_folder(name):\n try:\n dirname = os.path.dirname(__file__)\n filename = os.path.join(dirname, name)\n os.makedirs(filename)\n return 0\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n return -1", "def _check_path(path):\n os.system(\"if [ ! -d \" + path + \" ]; then mkdir -p \" + path + \"; fi\")", "def create_folder(folders_to_create=[]):\n for f in folders_to_create:\n if not os.path.exists(f):\n os.makedirs(f)", "def check_dir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def create_directory(folder_name):\n if not os.path.exists(folder_name):\n os.makedirs(folder_name)", "def create_dir():\n if check_dir_exist():\n return False\n else:\n os.makedirs(path_structure)\n return True", "def make_sure_path_exists(path):\n try:\n os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise", "def make_sure_path_exists(path):\n try: os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST: raise", "def _ensure_dir_exists(self, directory):\n directory = directory.strip()\n if not Path(directory).exists():\n os.mkdir(directory)", "def createFolder(directory) -> None:\n try:\n if not os.path.exists(directory):\n os.makedirs(directory)\n except Exception as error:\n print(f\"Error: createFolder({directory}) -> {error}\")", "def ensure_dir(d):\n\n if not os.path.exists(d):\n os.makedirs(d, exist_ok=True)\n\n return", "def _mkdir_if_not_exist(path):\n if not os.path.exists(path):\n try:\n os.makedirs(path)\n except OSError as e:\n if e.errno == errno.EEXIST and os.path.isdir(path):\n logger.warning(\n 'be happy if some process has already created {}'.format(\n path))\n else:\n raise OSError('Failed to mkdir {}'.format(path))", "def create_dir_if_doesnt_exist(dir_path):\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n return", "def make_dir_if_needed(dir) :\n\tif not exists(dir) :\n\t\tos.makedirs(dir)", "def exist_ok_mkdir (path, mode=0777):\n try:\n os.mkdir (path, mode)\n except OSError:\n if not os.path.isdir (path):\n raise", "def ensure_dir(f):\n\td=os.path.dirname(f)\n\tif not os.path.exists(d):\n\t\tos.makedirs(d)", "def ensure_dir(root, path):\n full_path = root\n for seg in path.split(os.sep):\n full_path += os.sep + seg\n if os.path.exists(full_path):\n if not os.path.isdir(full_path):\n raise ValueError(\"'{}' is not a directory\".format(full_path))\n else:\n os.makedirs(full_path)", "def mkdir(path):\n\tif not Path(path).exists():\n\t\tPath(path).mkdir(parents=True, exist_ok=True)", "def assure_path_exists(self, path):\n\n dir = os.path.dirname(path)\n if not os.path.exists(dir):\n os.makedirs(dir)", "def ensuredir(path):\n # Copied from sphinx.util.osutil.ensuredir(): BSD licensed code, so it's OK\n # to add to this project.\n EEXIST = getattr(errno, 'EEXIST', 0)\n try:\n os.makedirs(path)\n except OSError as err:\n # 0 for Jython/Win32\n if err.errno not in [0, EEXIST]:\n raise", "def _check_or_create_dir(directory):\n if not tf.gfile.Exists(directory):\n tf.gfile.MakeDirs(directory)", "def maybe_make_dir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def ensure_dir( dirName ):\r\n if not os.path.exists( dirName ):\r\n os.makedirs( dirName )", "def ensure_dir(dir_):\n try:\n os.mkdir(dir_)\n except OSError:\n assert os.path.isdir(dir_)", "def ensure_dirs_exist(path):\n os.makedirs(path, exist_ok=True)", "def mkdir_if_notexists(path):\n try:\n os.mkdir(path)\n except FileExistsError:\n pass", "def create_folders(folder_name):\n\n if os.path.exists(downloads_path + '\\\\' + folder_name):\n pass\n else:\n os.makedirs(folder_name)\n print(f'Folder: {folder_name} has been created in {downloads_path}')", "def checking_path():\n path = Path(\"phonebook\")\n try:\n path.mkdir(parents=True, exist_ok=False)\n except FileExistsError:\n pass\n else:\n pass", "def mkdir_if_missing(d):\n if not os.path.exists(d):\n os.makedirs(d)", "def create_directory_if_not_exists(directory_path):\n os.makedirs(directory_path, exist_ok=True)", "def ifnotexistmkdir(directory):\n if not os.path.exists(directory):\n os.mkdir(directory)\n return Path(directory)", "def check_path(dir_path):\n if not os.path.exists(dir_path):\n os.mkdir(dir_path, 0755)", "def mkdir(folder_path, exist_ok=True):\n\n # Generate dir\n os.makedirs(folder_path, exist_ok=exist_ok)\n\n return True", "def ensure_path(directory):\n if not path.exists(directory):\n os.makedirs(directory)\n # end if\n return directory", "def createFolder(folderFullPath):\n os.makedirs(folderFullPath, exist_ok=True)", "def exist_ok_makedirs (path, mode=0777):\n if not os.path.isdir (path):\n head, tail = os.path.split (path)\n if not tail:\n head, tail = os.path.split (head)\n if head and tail:\n exist_ok_makedirs (head, mode)\n exist_ok_mkdir (path, mode)", "def ensure_dirs_exists(self):\n os.makedirs(os.path.join(self.location, \"batches\"), exist_ok=True)\n os.makedirs(os.path.join(self.location, \"results\"), exist_ok=True)" ]
[ "0.82459253", "0.8094383", "0.8059282", "0.788112", "0.7779735", "0.7752992", "0.77248335", "0.77168036", "0.77027094", "0.76686084", "0.7639529", "0.76316696", "0.76276046", "0.7627525", "0.7594649", "0.7567482", "0.7552197", "0.75503033", "0.7529557", "0.7526078", "0.75254", "0.7512916", "0.7490926", "0.7458066", "0.7444984", "0.7444063", "0.7442587", "0.7422026", "0.741305", "0.7402993", "0.73841494", "0.73776454", "0.7368256", "0.7350468", "0.7343404", "0.73284996", "0.73280597", "0.73096055", "0.7301028", "0.7300524", "0.7294661", "0.7283377", "0.7275137", "0.7275137", "0.72726554", "0.7258377", "0.72576815", "0.7249299", "0.7245505", "0.72423774", "0.72401345", "0.7233169", "0.7230621", "0.7229388", "0.72276115", "0.721832", "0.7214569", "0.7208516", "0.7191891", "0.7187161", "0.7185111", "0.71689636", "0.7161454", "0.715934", "0.71543145", "0.7151872", "0.7141743", "0.7138145", "0.7137444", "0.7129204", "0.71253014", "0.7108062", "0.7104926", "0.70908177", "0.7080884", "0.70764124", "0.70741844", "0.7073596", "0.70734847", "0.7070537", "0.7059517", "0.70580304", "0.7049929", "0.70492524", "0.70486563", "0.7041085", "0.7037446", "0.70371246", "0.7037098", "0.7030301", "0.7027226", "0.7018647", "0.7014373", "0.70121783", "0.70119876", "0.6997651", "0.69945335", "0.6990592", "0.69841814" ]
0.7339701
36
a function converting csv output files from operational_sep_quantities to json files for observations
def obs_csv2json(input_file,output_file,example_path,instrument): obs_path = Path(cfg.obs_path) with open(example_path,'r') as e: example = js.load(e) #deleting unused categories del(example['sep_forecast_submission']['forecasts']) del(example['sep_forecast_submission']['triggers'][2]) del(example['sep_forecast_submission']['triggers'][1]) del(example['sep_forecast_submission']['triggers'][0]) del(example['sep_forecast_submission']['triggers'][0]['particle_intensity']['instrument']) del(example['sep_forecast_submission']['triggers'][0]['particle_intensity']['last_data_time']) del(example['sep_forecast_submission']['contacts']) del(example['sep_forecast_submission']['model']) del(example['sep_forecast_submission']['issue_time']) example['sep_forecast_submission']['mode'] = 'observation' #json template for observations obs_json = example fieldnames = ('energy_threshold','flux_threshold','start_time','intensity', 'peak_time','rise_time','end_time','duration','fluence>10', 'fluence>100') #extracting data from csv file with open(input_file,'r') as f: reader = csv.DictReader(f, fieldnames) out = js.dumps( [ row for row in reader ] ) obs_data = js.loads(out) data={} (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity'] ['observatory']) = instrument #creating data for all energy levels forecast for j in range(1,len(obs_data)): data[j-1]=obs_data[j] #recording start and end times for all events for i in range(len(data)): data[i]['start_time'] = datetime.strptime(data[i]['start_time'],'%Y-%m-%d %H:%M:%S') data[i]['start_time'] = data[i]['start_time'].isoformat() data[i]['end_time'] = datetime.strptime(data[i]['end_time'],'%Y-%m-%d %H:%M:%S') data[i]['end_time'] = data[i]['end_time'].isoformat() data[i]['peak_time'] = datetime.strptime(data[i]['peak_time'],'%Y-%m-%d %H:%M:%S') data[i]['peak_time'] = data[i]['peak_time'].isoformat() #recording observed values for all events if i > 0: (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity'] ['ongoing_events']).append({}) event = (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity'] ['ongoing_events'][i]) #start and end times event['start_time']=data[i]['start_time'] event['threshold'] = data[i]['flux_threshold'] event['energy_min'] = float(data[i]['energy_threshold'][1:]) event['energy_max'] = -1 event['end_time']=data[i]['end_time'] #peak values event['peak_intensity']=data[i]['intensity'] event['peak_time'] = data[i]['peak_time'] event['intensity_units']='pfu' #fluence values event['fluence'] = [{'energy_min' : '10','fluence_value' : 'fluence_value', 'units' : 'MeV [cm^-2]'}, {'energy_min' : '100', 'fluence_value' : 'fluence_value', 'units' : 'MeV [cm^-2]'}] event['fluence'][0]['fluence']=data[i]['fluence>10'] event['fluence'][1]['fluence']=data[i]['fluence>100'] if float(event['peak_intensity']) >= cfg.pfu_threshold[cfg.energy_threshold.index (int(event['energy_min']))]: event['all_clear_boolean'] = 'false' else: event['all_clear_boolean'] = 'true' #building json file with open(obs_path / output_file, 'w') as s: js.dump(obs_json,s,indent=1) print('json file %s created' %output_file) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_dataset(input_file_path, output_file_path):\n col_index_map = {'user_id': 0, 'session_id': 1, 'timestamp': 2, 'step': 3, 'action_type': 4, 'reference': 5,\n 'platform': 6, 'city': 7, 'device': 8,\n 'current_filters': 9, 'impressions': 10, 'prices': 11}\n flat_dict = dict()\n with open(input_file_path, 'r') as csvFile:\n reader = csv.reader(csvFile)\n header = next(reader)\n col_names = [col_name for col_name in col_index_map.keys()]\n col_names.pop(0)\n index = 0\n for row in tqdm(reader):\n if len(flat_dict) > 40000:\n index += 1\n with open(output_file_path + \"_\" + str(index) + \".json\", \"w\") as file:\n json.dump(flat_dict, file)\n print(\" JSON : \", index)\n flat_dict = dict()\n col_values = [row[col_index_map[c_n]] for c_n in col_names]\n dict_for_each_row = dict(zip(col_names, col_values))\n to_list = dict_for_each_row['impressions']\n dict_for_each_row['impressions'] = to_list.split('|')\n to_list = dict_for_each_row['prices']\n dict_for_each_row['prices'] = to_list.split('|')\n user_id = row[col_index_map['user_id']]\n if user_id in flat_dict:\n flat_dict[user_id].append(dict_for_each_row)\n else:\n flat_dict[user_id] = [dict_for_each_row]\n\n print(\"Output is Saved\")", "def write_csv_file(csv_output_file, full_data):\n j = 0\n csv_file_path = make_dir(csv_output_file)\n\n # csv_file_path = os.path.join(csv_file_path, csv_output_file)\n try:\n with open(csv_file_path, 'w', newline='') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',')\n csvwriter.writerow(['tripId', 'agency_tripId', 'itinerary_nb', 'modes', 'actual_time', 'perceived_time',\n 'start_time', 'end_time', 'walk_time', 'walk_distance','transit_time', 'waiting_time',\n 'boardings', 'bus_lines_numbers', 'boarding_stop_ids', 'debarquer_stop_ids'])\n print(\"======================================\")\n print(\"= Creating CSV file from JSON files =\")\n print(\"======================================\")\n for id in full_data.keys(): # just so we can get all the ids\n data = full_data[id]\n j += 1\n\n printrp('( ' + str(j) + ' / ' + str(len(full_data) - 1) + ' )') if found_CmdPrinter else print(j)\n\n if 'error' in data:\n # if no itineraries were find (ie. there was an error), write the error id and error message\n # note : msg is the short message (eg. PATH_NOT_FOUND), message is the long description\n csvwriter.writerow([id] + ['error'] + [str(data['error']['id'])] +\n [str(data['error']['message'])] + [str(data['error']['msg'])])\n else:\n for itinerary_nb in range(len(data['plan']['itineraries'])):\n\n boarding = 0\n busNbs = \"\"\n boarding_stop_ids = \"\"\n debarquer_stop_ids = \"\"\n agency_trip_ids = \"\"\n modes = \"\"\n for leg in data['plan']['itineraries'][itinerary_nb]['legs']:\n modes += leg['mode'] + ';'\n if leg['mode'] == 'BUS':\n # every time a BUS step is included in the itineraries :\n # add 1 to the boarding counter\n # add the bus line number to busNbs\n # add the stop_ids to boarding_stop_ids and debarquer_stop_ids\n boarding += 1\n busNbs += leg['route'] + \";\"\n\n boarding_stop_ids += str(leg['from']['stopCode']) + ';'\n debarquer_stop_ids += str(leg['to']['stopCode']) + ';'\n agency_trip_ids += str(leg['tripId'].split(':')[1]) + ';'\n # we need to .split that line because tripId is given as agencyId:tripId\n\n\n busNbs = busNbs[:-1] # removing the trailing semi-colon\n boarding_stop_ids = boarding_stop_ids[:-1]\n debarquer_stop_ids = debarquer_stop_ids[:-1]\n agency_trip_ids = agency_trip_ids[:-1]\n modes = modes[:-1]\n startTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(data['plan']['itineraries'][itinerary_nb]['startTime']/1000))\n endTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(data['plan']['itineraries'][itinerary_nb]['endTime']/1000))\n # those are /1000 because OTP gives Epoch time in milliseconds\n\n walkTime = data['plan']['itineraries'][itinerary_nb]['walkTime']\n transitTime = data['plan']['itineraries'][itinerary_nb]['transitTime']\n waitingTime = data['plan']['itineraries'][itinerary_nb]['waitingTime']\n\n # Write all the information inside a csv file\n csvwriter.writerow([id,\n str(agency_trip_ids),\n str(itinerary_nb+1),\n str(modes),\n str(data['plan']['itineraries'][itinerary_nb]['duration']),\n str(get_perceived_time(walkTime, transitTime, waitingTime)),\n str(startTime),\n str(endTime),\n str(walkTime),\n str(data['plan']['itineraries'][itinerary_nb]['walkDistance']),\n str(transitTime),\n str(waitingTime),\n str(boarding),\n str(busNbs),\n str(boarding_stop_ids),\n str(debarquer_stop_ids)])\n except PermissionError:\n print('ERROR - Cannot write to CSV file. The file might be used by another app.')\n exit()\n except OSError:\n print(\"ERROR - Couldn't open file \" + csv_file_path + \". Please verify the file's permissions.\")\n print('( ' + str(j-1) + ' / ' + str(len(full_data) - 1) + ' )')", "def parse_isys_output(path_to_csv,directory_details):\n isys_results=open(path_to_csv).readlines()\n partial_paths_list=[]\n #below we are starting with the second row because the first row has the column\n # headings \n start=1\n for item in isys_results[start:]:\n partial_path=item.split(',')[0]\n partial_paths_list.append(partial_path)\n filing_details=[]\n for partial_path in partial_paths_list:\n temp_dict={}\n split_partial_path=partial_path.split('\\\\')\n temp_dict['cik']=split_partial_path[1]\n temp_dict['date_details']=split_partial_path[2]\n temp_dict['file_type']=split_partial_path[3].split('.')[-1]\n temp_dict['file_path']=directory_details+partial_path\n filing_details.append(temp_dict)\n return filing_details", "def metrics_to_json(metrics_csv_int, region, coords, metrics_filename):\n data = {}\n with open(metrics_csv_int,'r') as f:\n reader = csv.reader(f)\n fields = next(reader)\n for row in reader:\n data[row[0]] = {\"Temporal intermittency\": {},\n \"Spatial intermittency\": {}}\n # skip the first key in fields, clean up field name\n for i,field in enumerate(fields[1:6]):\n data[row[0]][\"Temporal intermittency\"].update({field[5:]:float(row[i+1])})\n for i,field in enumerate(fields[6:]):\n data[row[0]][\"Spatial intermittency\"].update({field[3:]:float(row[i+6])})\n with open(metrics_filename, 'r') as fname:\n metrics = json.load(fname)\n\n # Add region to dimensions information\n metrics['DIMENSIONS']['dimensions']['region'].update({region: coords})\n\n # Update model statistics\n for model in data:\n if not (model in metrics['RESULTS']):\n metrics['RESULTS'][model] = {}\n metrics['DIMENSIONS']['dimensions']['dataset'].update({model: {}})\n metrics['RESULTS'][model][region] = data[model]\n\n # Write new metrics to same file\n with open(metrics_filename, 'w') as fname:\n json.dump(metrics,fname,indent = 2)", "def _json_export(self, exppath):\n # TODO: Settle on JSON format for colortable\n pass", "def test_csv_to_json():\r\n json_dict = {\r\n \"covariates\":{ \r\n \"value\":{\r\n \"subject0\": {\r\n \"attribute0\": 3.0,\r\n \"attribute1\": 12.0\r\n },\r\n \"subject1\": {\r\n \"attribute0\": 1.2,\r\n \"attribute1\": 10.9\r\n }\r\n }\r\n },\r\n \"data\":{\r\n \"fulfilled\": True,\r\n \"value\": {\r\n \"type\": [\"float\"],\r\n \"value\": [\r\n \"attribute0\",\r\n \"attribute1\"\r\n ]\r\n }\r\n },\r\n \"lambda\":{\r\n \"fulfilled\": True,\r\n \"value\": 0\r\n }\r\n }\r\n json_string = \"[\" + json.dumps(json_dict).replace(' ', '').replace('\\n', '') + \"]\"\r\n directory = os.path.join(os.getcwd(), \"test/\")\r\n lambda_ = \"0\"\r\n data_type = [\"float\"]\r\n data_vars = [\"attribute0\", \"attribute1\"]\r\n assert csv_to_json_(directory, lambda_, data_type, data_vars).replace(' ', '').replace('\\n', '') == json_string", "def main(input_filepath, output_filepath):\n productsDict = dataToDict(input_filepath)\n productsList = dictToCSV(productsDict)\n toCSV(productsList, output_filepath)\n\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def output_to_cwl_json(\n galaxy_output, get_metadata, get_dataset, get_extra_files, pseduo_location=False,\n):\n def element_to_cwl_json(element):\n element_output = GalaxyOutput(\n galaxy_output.history_id,\n element[\"object\"][\"history_content_type\"],\n element[\"object\"][\"id\"],\n )\n return output_to_cwl_json(element_output, get_metadata, get_dataset, get_extra_files)\n\n output_metadata = get_metadata(galaxy_output.history_content_type, galaxy_output.history_content_id)\n\n def dataset_dict_to_json_content(dataset_dict):\n if \"content\" in dataset_dict:\n return json.loads(dataset_dict[\"content\"])\n else:\n with open(dataset_dict[\"path\"]) as f:\n return json.load(f)\n\n if output_metadata[\"history_content_type\"] == \"dataset\":\n ext = output_metadata[\"file_ext\"]\n assert output_metadata[\"state\"] == \"ok\"\n if ext == \"expression.json\":\n dataset_dict = get_dataset(output_metadata)\n return dataset_dict_to_json_content(dataset_dict)\n else:\n file_or_directory = \"Directory\" if ext == \"directory\" else \"File\"\n if file_or_directory == \"File\":\n dataset_dict = get_dataset(output_metadata)\n properties = output_properties(pseduo_location=pseduo_location, **dataset_dict)\n basename = properties[\"basename\"]\n extra_files = get_extra_files(output_metadata)\n found_index = False\n for extra_file in extra_files:\n if extra_file[\"class\"] == \"File\":\n path = extra_file[\"path\"]\n if path == SECONDARY_FILES_INDEX_PATH:\n found_index = True\n\n if found_index:\n ec = get_dataset(output_metadata, filename=SECONDARY_FILES_INDEX_PATH)\n index = dataset_dict_to_json_content(ec)\n for basename in index[\"order\"]:\n for extra_file in extra_files:\n if extra_file[\"class\"] == \"File\":\n path = extra_file[\"path\"]\n if path == os.path.join(SECONDARY_FILES_EXTRA_PREFIX, basename):\n ec = get_dataset(output_metadata, filename=path)\n if not STORE_SECONDARY_FILES_WITH_BASENAME:\n ec[\"basename\"] = basename + os.path.basename(path)\n else:\n ec[\"basename\"] = os.path.basename(path)\n ec_properties = output_properties(pseduo_location=pseduo_location, **ec)\n if \"secondaryFiles\" not in properties:\n properties[\"secondaryFiles\"] = []\n\n properties[\"secondaryFiles\"].append(ec_properties)\n else:\n basename = output_metadata.get(\"cwl_file_name\")\n if not basename:\n basename = output_metadata.get(\"name\")\n\n listing = []\n properties = {\n \"class\": \"Directory\",\n \"basename\": basename,\n \"listing\": listing,\n }\n\n extra_files = get_extra_files(output_metadata)\n for extra_file in extra_files:\n if extra_file[\"class\"] == \"File\":\n path = extra_file[\"path\"]\n ec = get_dataset(output_metadata, filename=path)\n ec[\"basename\"] = os.path.basename(path)\n ec_properties = output_properties(pseduo_location=pseduo_location, **ec)\n listing.append(ec_properties)\n\n return properties\n\n elif output_metadata[\"history_content_type\"] == \"dataset_collection\":\n if output_metadata[\"collection_type\"] == \"list\":\n rval = []\n for element in output_metadata[\"elements\"]:\n rval.append(element_to_cwl_json(element))\n elif output_metadata[\"collection_type\"] == \"record\":\n rval = {}\n for element in output_metadata[\"elements\"]:\n rval[element[\"element_identifier\"]] = element_to_cwl_json(element)\n return rval\n else:\n raise NotImplementedError(\"Unknown history content type encountered\")", "def convert_to_dict_then_json(row, sep,feature_list):\n feature_values = row.decode('utf-8').replace('\\n', '').replace('\\r', '').split(sep)\n feature_values_clean = [float(x) if is_number(x) else 0 for x in feature_values]\n feat_dict = dict(zip(feature_list, feature_values_clean))\n feat_json = json.dumps(feat_dict).encode('utf-8')\n return(feat_json)", "def _export_jql_items(items, output_file, format='json', compress=False):\n if format == 'json':\n Mixpanel.export_data(items, output_file, format=format, compress=compress)\n elif format == 'csv':\n with open(output_file, 'w') as f:\n f.write(items)\n if compress:\n Mixpanel._gzip_file(output_file)\n os.remove(output_file)\n else:\n Mixpanel.LOGGER.warning('Invalid format must be either json or csv, got: ' + format)\n return", "def csv_file(data,output_dir,filename,order = [],head = True):\n with open(output_dir + filename + '.csv', 'w') as f:\n write = csv.writer(f)\n write.writerows(manip.dic_to_list(data,order,head),)\n return None", "def create_json_from_csv(csv_file, delimiter, cols_delimiter, keep, dic_types, infer_types, max_docs, json_file, per_line):\n\n # Get header of csv\n header_csv = get_header_csv(csv_file, cols_delimiter)\n\n # Create structure of json\n print(' [INFO] Creating json\\'s structure')\n jstruct = create_json_structure(header_csv, delimiter)\n print(jstruct)\n # Read csv line by line and create list of json\n print(' [INFO] Filling json') \n js_content = []\n with open(csv_file, 'r') as f:\n reader = csv.DictReader(f, delimiter=cols_delimiter)\n i = 0\n beg = True\n end = True\n # Prepare output file if dump in one file\n if max_docs == -1 and not per_line:\n beg = False\n end = False\n with open(json_file, 'w') as jsf:\n jsf.write('[\\n')\n for row in reader:\n if infer_types:\n row = {x: infer_type(row[x]) for x in row}\n jexample = copy.deepcopy(jstruct)\n js_content.append(create_json_example(row, header_csv, jexample, delimiter, keep, dic_types))\n\n i += 1\n # Dump json in streaming\n if (max_docs == -1) and ((i % 10000) == 0):\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, end)\n js_content = []\n elif (max_docs != -1) and (i % max_docs) == 0:\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, end)\n js_content = []\n\n # Dump last jsons\n if js_content:\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, True)\n\n print(' [INFO] Json{} successfully created and dumped'.format('s' if (max_docs != -1) else ''))\n\n return", "def df_to_json(complete_dataset, output_path, static_columns):\n megajson = {}\n\n static_columns = [\"continent\", \"location\"] + list(static_columns)\n\n complete_dataset = complete_dataset.dropna(axis=\"rows\", subset=[\"iso_code\"])\n\n for _, row in complete_dataset.iterrows():\n\n row_iso = row[\"iso_code\"]\n row_dict_static = row.drop(\"iso_code\")[static_columns].dropna().to_dict()\n row_dict_dynamic = row.drop(\"iso_code\").drop(static_columns).dropna().to_dict()\n\n if row_iso not in megajson:\n megajson[row_iso] = row_dict_static\n megajson[row_iso][\"data\"] = [row_dict_dynamic]\n else:\n megajson[row_iso][\"data\"].append(row_dict_dynamic)\n\n with open(output_path, \"w\") as file:\n file.write(json.dumps(megajson, indent=4))", "def export_sampleStorage_csv(self, sample_ids_I, filename_O):\n\n data_O = [];\n for sample_id in sample_ids_I:\n data_tmp =[];\n data_tmp = self.get_rows_sampleID_limsSampleStorage(sample_id);\n data_O.extend(data_tmp);\n if data_O:\n io = base_exportData(data_O);\n io.write_dict2csv(filename_O);", "def csv_to_json(csv_filename):\n csv_trimmed = csv_filename[:-3]\n json_added = csv_trimmed + 'json'\n return json_added", "def csv_to_json(name: str) -> str:\n with open(name) as file: # type: IO[str]\n result: Dict[str, Any] = {}\n for row in DictReader(file):\n item = row[\"Item\"].split(\"(\")[0].strip()\n size = row[\"Serving Size\"].rstrip(\")\").replace(\"(\", \"/ \")\n result[f\"{item} [{size}]\"] = row[\"Calories\"]\n return dumps(result)", "def transform2json(source, target):\n behaviors = pd.read_table(\n source,\n header=None,\n names=['uid', 'time', 'clicked_news', 'impression'])\n f = open(target, \"w\")\n with tqdm(total=len(behaviors), desc=\"Transforming tsv to json\") as pbar:\n for row in behaviors.itertuples(index=False):\n item = {}\n item['uid'] = row.uid[1:]\n item['time'] = row.time\n item['impression'] = {\n x.split('-')[0][1:]: int(x.split('-')[1])\n for x in row.impression.split()\n }\n f.write(json.dumps(item) + '\\n')\n\n pbar.update(1)\n\n f.close()", "def build_csv_write(api):\n\n write_rows = []\n for info in api:\n write_rows.append([info[\"number\"], info[\"status\"], info[\"available_bike_stands\"],\n info[\"available_bikes\"], time])\n\n return write_rows", "def Export_in_files(COVID_data, COVID_data_reconstructed):\r\n F_data_file = open(Datafiles_directory + '\\\\OWID COVID data %s formatted.csv' % (date.today().isoformat()), 'w')\r\n FR_data_file = open(Datafiles_directory + '\\\\OWID COVID data %s formatted reconstructed.csv' % (date.today().isoformat()), 'w')\r\n \r\n COVID_data_lists = [COVID_data, COVID_data_reconstructed]\r\n Data_file_list = [F_data_file, FR_data_file]\r\n Countries_list = list(COVID_data.keys())[1:]\r\n \r\n for Data_set_inc in range(2): # Each data list (raw and reconstructed) is written in its corresponding file\r\n COVID_data_temp = COVID_data_lists[Data_set_inc]\r\n Data_file_temp = Data_file_list[Data_set_inc]\r\n \r\n Data_file_temp.write('Country;Date;' + ';'.join(COVID_data_temp['_Country']['Date']) + '\\n')\r\n \r\n for Country in Countries_list:\r\n COVID_data_single_country = COVID_data_temp[Country]\r\n \r\n Date_list = list(COVID_data[Country].keys())\r\n for Date in Date_list:\r\n COVID_data_single_country_single_date = COVID_data_single_country[Date]\r\n Row_reformatted = ['' if Item == None else str(Item).replace('.', ',') for Item in COVID_data_single_country_single_date] # None elements are replaced by empty strings because an empty cell is better to see that there is no data in excel rather than None\r\n \r\n Data_file_temp.write('%s;%s;' % (Country, Date))\r\n Data_file_temp.write(';'.join(str(Item) for Item in Row_reformatted))\r\n Data_file_temp.write('\\n')\r\n \r\n Data_file_temp.close()", "def write_to_csv(self, name_suffix = ''):\n f_path = os.path.join(self.root_dir, 'res' + name_suffix + '.csv')\n field_names = [] # the first field in CSV is 'obj_val'\n\n # put the keys in the cost, prim_var_change, dual_var_change and fea_conditions as field names if any\n for key in self.cost.keys():\n field_names.append(key)\n for key in self.cost_change.keys():\n field_names.append(key)\n for key in self.prim_var_change.keys():\n field_names.append(key)\n for key in self.dual_var_change.keys():\n field_names.append(key)\n for key in self.fea_conditions.keys():\n field_names.append(key)\n\n\tprint f_path\n\n with open(f_path, mode = 'wb') as csv_file: # open the file, if not exist, create it\n writer = csv.DictWriter(csv_file, fieldnames = field_names) # create a writer which maps the dictionaries onto output rows in CSV\n writer.writeheader() # write the field names to the header\n temp_dict = {} # create a temporary dict used to output rows\n row_max = self.get_iter_num() # get the max iters which indicates the number of rows in CSV\n print ('number of rows: ' + str(row_max))\n #print (field_names)\n for row in range(row_max + 1):\n temp_dict.clear() # clear all items\n start_idx = 0\n for i in range(len(self.cost)):\n field = field_names[start_idx + i]\n\t\t if row > len(self.cost[field]) - 1:\n\t\t\ttemp_dict[field] = ''\n\t\t else: temp_dict[field] = self.get_cost_val(field, row)\n\n start_idx = start_idx + len(self.cost) # the start pos of fields in field_names for prim_var_change\n for i in range(len(self.cost_change)): # for each cost_change\n field = field_names[start_idx + i]\n if row == 0: # for row 0 (iter 0), we will set '/' to the change of primal variables\n temp_dict[field] = '/'\n elif row > len(self.cost_change[field]) - 1:\n\t\t\t temp_dict[field] = ''\n\t\t else:\n temp_dict[field] = self.get_cost_change_value(field, row - 1)\n\n\n start_idx = start_idx + len(self.cost_change)\n for i in range(len(self.prim_var_change)): # for each prim_var_change\n field = field_names[start_idx + i]\n if row == 0: # for row 0 (iter 0), we will set '/' to the change of primal variables\n temp_dict[field] = '/'\n\t\t elif row > len(self.prim_var_change[field]) - 1:\n\t\t\ttemp_dict[field] = ''\n else:\n temp_dict[field] = self.get_prim_change_value(field, row - 1)\n\n start_idx = start_idx + len(self.prim_var_change) # go to the start pos of fields in field_names for dual_var_change\n for i in range(len(self.dual_var_change)): # for each dual_var_change\n field = field_names[start_idx + i]\n if row == 0: # for row 0 (iter 0), we will set '/' to the change of dual variables\n temp_dict[field] = '/'\n elif row > len(self.dual_var_change[field]) - 1:\n\t\t\ttemp_dict[field] = '' \n\t\t else:\n temp_dict[field] = self.get_dual_change_value(field, row - 1)\n\n start_idx = start_idx + len(self.dual_var_change) # go the the start pos of fields in field_names for fea_conditions\n for i in range(len(self.fea_conditions)): # for each fea_condition\n field = field_names[start_idx + i]\n\t\t if row > len(self.fea_conditions[field]) - 1:\n\t\t\ttemp_dict[field] = ''\n else: temp_dict[field] = self.get_fea_condition_value(field, row)\n\n writer.writerow(temp_dict)\n\n # we also save the value of primal values if not saved\n if not self.pdv_to_csv:\n self.save_last_prims()", "def main(input_filepath, output_filepath, data_type):\n a = jsonCSV(input_filepath, os.path.join(output_filepath, data_type+'.csv'))\n column_names = a.get_superset_column_names()\n a.read_write(column_names)\n\n logger = logging.getLogger(__name__)\n logger.info('transform log files into csv')", "def opf2json(opf_path: str, output_file: Optional[str] = None) -> None:\n\n logger.info(\"Converting file: %s ...\", opf_path)\n\n header_format = \"<iii\"\n header_size = struct.calcsize(header_format)\n\n with open(opf_path, \"rb\") as f:\n header_data = struct.unpack(header_format, f.read(header_size))\n\n n_samples = header_data[0]\n n_features = header_data[2]\n\n file_format = \"<ii\"\n for _ in range(n_features):\n file_format += \"f\"\n\n data_size = struct.calcsize(file_format)\n\n json = {\"data\": []}\n for _ in range(n_samples):\n data = struct.unpack(file_format, f.read(data_size))\n\n # Note that we subtract 1 from `labels` column\n json[\"data\"].append(\n {\"id\": data[0], \"label\": data[1] - 1, \"features\": list(data[2:])}\n )\n\n if not output_file:\n output_file = opf_path.split(\".\")[0] + \".json\"\n\n with open(output_file, \"w\") as f:\n j.dump(json, f)\n\n logger.info(\"File converted to %s.\", output_file)", "def encode_to_raw_json(self, feature_collection, csv_f):\n clean_name = str(path.splitext(csv_f)[0]) + \".json\"\n with open(path.join(self.uk_postcodes, clean_name), \"wb\") as json_outfile:\n dump(feature_collection, json_outfile)", "def result2json(ifilename, poiname, ofilename):\n nameMap = {\n \"SysWeight1\" : \"mc\",\n \"SysWeight2\" : \"FSR\",\n \"SysWeight3\" : \"bkg\",\n \"SysWeight4\" : \"tagpt\",\n \"SysWeight6\" : \"Prefire\",\n \"SysRecoil2\" : \"recoil_eta\",\n \"SysRecoil3\" : \"recoil_keys\",\n \"SysRecoil6\" : \"recoil_stat0\",\n \"SysRecoil7\" : \"recoil_stat1\",\n \"SysRecoil8\" : \"recoil_stat2\",\n \"SysRecoil9\" : \"recoil_stat3\",\n \"SysRecoil10\": \"recoil_stat4\",\n \"SysRecoil11\": \"recoil_stat5\",\n \"SysRecoil12\": \"recoil_stat6\",\n \"SysRecoil13\": \"recoil_stat7\",\n \"SysRecoil14\": \"recoil_stat8\",\n \"SysRecoil15\": \"recoil_stat9\",\n }\n\n def getNuisName(nuis):\n if nuis in nameMap.keys():\n return nameMap[nuis]\n elif bool(re.match(r\"\\w*bin\\d+shape\", nuis)):\n return \"QCD_\" + nuis\n else:\n return nuis\n\n ifile = ROOT.TFile(ifilename)\n himpact = ifile.Get(\"nuisance_impact_mu\")\n himpact_grouped = ifile.Get(\"nuisance_group_impact_mu\")\n tree = ifile.Get(\"fitresults\")\n tree.GetEntry(0)\n\n # find the POI bin for poiname\n ibinX = -1\n for binX in range(1, himpact.GetNbinsX()+1):\n poi = himpact.GetXaxis().GetBinLabel(binX)\n if poi == poiname:\n ibinX = binX\n continue\n assert ibinX >=0, \"Can not find the POI {} in the postfit file {}. Please check.\".format(poiname, ifilename)\n\n results = OrderedDict()\n results['POIs'] = []\n val = getattr(tree, poiname)\n err = abs(getattr(tree, poiname+\"_err\"))\n poi = OrderedDict()\n poi['fit'] = [val-err, val, val+err]\n poi['name'] = poiname\n results['POIs'].append(poi)\n\n results['method'] = 'default'\n results['params'] = []\n\n # dump impacts\n impacts = OrderedDict()\n for ibinY in range(1, himpact.GetNbinsY()+1):\n nuis = himpact.GetYaxis().GetBinLabel(ibinY)\n impacts[nuis] = himpact.GetBinContent(ibinX, ibinY)\n\n # add the grouped QCD and Recoil systematic\n groupnames = []\n for ibinY in range(1, himpact_grouped.GetNbinsY()+1):\n tmpY = himpact_grouped.GetYaxis().GetBinLabel(ibinY)\n if tmpY == 'stat':\n continue\n impacts[tmpY] = himpact_grouped.GetBinContent(ibinX, ibinY)\n groupnames.append(tmpY)\n\n # sort impacts, descending\n impacts = OrderedDict(sorted(impacts.items(), key=lambda x: abs(x[1]), reverse=True))\n\n pulls = OrderedDict()\n for nuis in impacts.keys():\n if nuis not in groupnames:\n val = getattr(tree, nuis)\n err = getattr(tree, nuis+\"_err\")\n err = abs(err)\n else:\n # manually set the postfit of the grouped sys to [-1,1], and pulled at 0,\n # since only the impacts are useful to us\n val = 0.\n err = 1.\n pulls[nuis] = [val - err, val, val + err]\n\n # save to results\n for nuis in impacts.keys():\n systematic = OrderedDict()\n systematic['fit'] = pulls[nuis]\n systematic['groups'] = []\n systematic['impact_' + poiname] = impacts[nuis]\n systematic['name'] = getNuisName(nuis)\n systematic['prefit'] = [-1.0, 0., 1.0]\n systematic[poiname] = [poi['fit'][1] - impacts[nuis], poi['fit'][1], poi['fit'][1] + impacts[nuis]]\n systematic['type'] = \"Gaussian\"\n print(getNuisName(nuis), pulls[nuis][1], pulls[nuis][1]-pulls[nuis][0], impacts[nuis])\n\n results['params'].append(systematic)\n\n with open(ofilename, 'w') as fp:\n json.dump(results, fp, indent=2)", "def write2json(output, in_data):\n print(\"Writeing \" + output)\n with open(output, 'w') as f:\n json.dump(in_data, f, indent=4, sort_keys=True)", "def out_put_data(OOS_result: dir, category: str) -> pandas.core.frame.DataFrame:\n \n header = ['SKU', 'Store', 'category', 'OOS_days', 'date_list', 'OOS_lastDay','avg_loss_sale_quantity',\n 'avg_loss_net_sale','avg_loss_mergin', 'total_loss_sale_quantity','total_loss_net_sale','total_loss_mergin']\n output_data = pd.DataFrame(columns = header)\n new_row = {}\n \n for key, value in OOS_result.items():\n new_row['Store'] = key[1]\n new_row['SKU'] = key[0]\n new_row['Category'] = category\n new_row['OOS_days'] = value[0]\n new_row['date_list'] = value[5]\n new_row['OOS_lastDay'] = value[4]\n new_row['avg_loss_sale_quantity'] = value[3]\n new_row['avg_loss_net_sale'] = value[2]\n new_row['avg_loss_mergin'] = value[1]\n new_row['total_loss_sale_quantity'] = value[3] *value[0]\n new_row['total_loss_net_sale'] = value[2] *value[0]\n new_row['total_loss_mergin'] = value[1] *value[0]\n \n ## insert the new row \n output_data = output_data.append(new_row, ignore_index=True) \n return output_data", "def main():\n try:\n kerbals_csv = pd.read_csv(\"kerbals.csv\")\n except FileNotFoundError:\n print(\"Kerbals csv file not found in current directory!\")\n sys.exit(1)\n kerbals_csv.to_json(\"kerbals.json\", orient=\"records\")\n kerbals_json = open(\"kerbals.json\")\n print(kerbals_json.read())\n return 0", "def write_data_files(self):\n # build our strings\n header_string = \"\"\n data_string = \"\"\n for value in self.data.values():\n header_string += value[2] + \",\"\n if value[0] != None:\n data_string += value[1].format(value[0])\n else:\n data_string += \",\"\n # remove the extra comma and replace with a newline\n header_string = header_string[:-1]\n header_string += \"\\n\"\n data_string = data_string[:-1]\n data_string += \"\\n\"\n \n # show what we built\n #print(header_string)\n #print(data_string)\n \n # open a temp file\n with open(\"{:s}\\\\VWSInput\\\\temp_data.csv\".format(self.path), \"w\") as temp_file:\n #temp_file.write(header_string)\n temp_file.write(data_string)\n \n # move to the input file\n filetools.mv(\"{:s}\\\\VWSInput\\\\temp_data.csv\".format(self.path), \"{:s}\\\\VWSInput\\\\data.csv\".format(self.path))\n \n return", "def put_str_repr_on_csv():\n for file_name in tqdm(os.listdir(\"./raw_data\")):\n file = pathlib.Path(f\"./raw_data/{file_name}\")\n if file.suffix == \".csv\" and \"str\" not in file.stem:\n df = pd.read_csv(\"./raw_data/\"+file_name)\n df['jones_str'] = df['Jones_polynomial'].apply(lambda x: eval(x)).transform(lambda x: poly_to_str(x))\n df['alexander_str'] = df['Alexander_polynomial'].apply(lambda x: eval(x)).transform(lambda x: poly_to_str(x))\n df.to_csv(f\"{file.stem}_str.csv\", index=False)", "def _write_csv(self):\n\n # add the label to the header\n if self.input_data.get_value(InputType.TIME_PERIOD) == 'all':\n self.header.append('Date')\n else:\n self.header.append('sample id')\n\n key_list = []\n\n for i, cube in enumerate(self.cube_list):\n if self.input_data.get_value(InputType.TIME_PERIOD) == 'all':\n self._write_sample_with_date(cube, i, key_list)\n else:\n self._write_sample(cube, i, key_list)\n\n output_data_file_path = self._get_full_file_name()\n self._write_data_dict(output_data_file_path, key_list)\n\n return [output_data_file_path]", "def _getCSVForPerField(self, statistic):\n\n rows = []\n\n chart_json = simplejson.loads(statistic.chart_json)\n description = chart_json['description'] \n header = []\n for item in description:\n header.append(item[-1].encode('utf-8'))\n rows.append(header)\n\n final_stat = simplejson.loads(statistic.final_json)\n for choice, result in final_stat.iteritems():\n row = []\n row.append(unicode(choice).encode('utf-8'))\n for item in result:\n row.append(unicode(item).encode('utf-8'))\n rows.append(row)\n\n return rows", "def test_first_time_reading_csv_file(self):\n\n # Create a temporary directory for test files\n temp_dir = \"test_files/observed\"\n os.makedirs(temp_dir, exist_ok=True)\n\n # Create a test CSV file\n csv_filepath = os.path.join(temp_dir, \"Abadia-BA_-11.56_-37.52.csv\")\n with open(csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n\n # Define the expected output JSON file path\n expected_output_filepath = os.path.join(temp_dir, \"BA_Abadia.json\")\n\n # Call the function under test\n extractor.csv_to_json(csv_filepath, temp_dir)\n\n # Verify that the output JSON file exists\n assert os.path.exists(expected_output_filepath)\n\n # Load the output JSON file\n with open(expected_output_filepath, \"r\") as json_file:\n json_data = json.load(json_file)\n\n # Verify the contents of the JSON file\n expected_data = {\n \"city\": \"Abadia\",\n \"state\": \"BA\",\n \"coordinates\": [\"-11.56\", \"-37.52\"],\n \"observed\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n }\n }\n assert json_data == expected_data\n\n # Clean up the temporary directory and files\n os.remove(csv_filepath)\n os.remove(expected_output_filepath)\n os.rmdir(temp_dir)", "def gp_file(data,filename,output_dir='',order = [],head = False):\n f = open(output_dir + filename + '.csv', 'w')\n f.write(str(len(order)-1) + '\\n')\n write = csv.writer(f)\n write.writerows(manip.dic_to_list(data,order,head),)\n f.closed\n\n return None", "def merge_csv_initial(output_filename, path):\n\n prefix = ['ParticipantID',\n 'igtb.datatime',\n 'igtb.timezone']\n\n names = ['irb',\n 'itp',\n 'ocb',\n 'inter.deviance',\n 'org.deviance',\n 'shipley.abs',\n 'shipley.vocab',\n 'neuroticism',\n 'conscientiousness',\n 'extraversion',\n 'agreeableness',\n 'openness',\n 'pos.affect',\n 'neg.affect',\n 'stai.trait',\n 'audit',\n 'gats.quantity',\n 'ipaq',\n 'psqi',\n 'gats.status']\n\n\n \n\n #b = np.loadtxt(path + names[0] + '.csv', delimiter=\",\", skiprows=1, usecols=(0, 1, 2), dtype=object)\n #a = np.array(b, dtype=object)\n\n for i,n in enumerate(names):\n file = path + n + '.csv'\n if(i==0):\n df = pd.read_csv(file, sep=',', index_col=0,usecols=[0,1,2,3]) \n df_all = df\n else:\n df = pd.read_csv(file, sep=',', index_col=0,usecols=[0,3]) \n df_all=pd.concat([df_all,df],axis=1)\n \n df_all=df_all.reset_index() \n a = df_all.as_matrix()\n\n # column_format = '%20s %10s %10s %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f'\n # column_format = '%20s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s'\n column_format = '%20s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s'\n names_string = ','.join(prefix + names)\n\n print(a.shape)\n\n np.savetxt(output_filename, a, delimiter=\",\", fmt=column_format, comments='', header=names_string)\n\n return output_filename", "def save_instrument_lists(\n instruments: List[pd.DataFrame],\n directory: str = DATA_DICTIONARY_DIR):\n check_data_dir(directory)\n ext: str = 'json' # 'csv'\n\n # Save our order.json\n instrument_order: List[str] = [\n x['form_name'].iloc[0] for x in instruments\n ]\n with open(f'{directory}/order.json', 'w') as jo:\n json.dump(instrument_order, fp=jo, indent=4)\n\n for instrument_df in instruments:\n instrument_name: str = list(set(instrument_df['form_name']))[0]\n instrument_file: str = f'{directory}/{instrument_name}.{ext}'\n json_data = instrument_df.to_json(orient='records')\n dict: Dict = json.loads(json_data)\n json_data = json.dumps(dict, indent=4)\n filehandle: TextIO = open(instrument_file, 'w')\n filehandle.write(json_data)\n filehandle.close()\n # json_to_csv(json_data, instrument_csv_file)", "def to_multiple_jsons(self):\n self.error_throw('output')\n\n if self.rank_method == methods_of_ranking[3]: #'diversified_ranking'\n self.output_div('multiple_jsons')\n else:\n self.output('multiple_jsons')", "def writeCSV(csvPath, usedmpicommands, first_table_values,second_table_values,third_table_values, df):\n\n print(\"Saving CSV files in directory '\" + os.path.realpath(csvPath) +\"'\")\n\n #routine Summary by rank metrics table\n metric_csv_table = df.to_csv(sep=';')\n with open(os.path.join(csvPath,'routineSummaryByRank_metric_table.csv'), 'w') as outfileMetricTable:\n outfileMetricTable.write(metric_csv_table)\n outfileMetricTable.close()\n\n #routine Summary by rank data table (just the data from the instrumenation file in csv format)\n with open(os.path.join(csvPath,'routineSummaryByRank_summary.csv'), 'w') as outfileMPICommands:\n wr = csv.writer(outfileMPICommands, delimiter=';')\n wr.writerows(usedmpicommands)\n outfileMPICommands.close()\n\n #application Summary by rank data (first table)\n #Columns: \"Rank\",\"Proc CPU Time\",\"User Portion\", \"User Portion in Percent\", \"System Portion\", \"System Portion in Percent\"\n with open(os.path.join(csvPath,'applicationSummaryByRank_1st_table.csv'), 'w') as outfile_first_table:\n wr = csv.writer(outfile_first_table, delimiter=';')\n wr.writerow([\"Rank\",\"Proc CPU Time\",\"User Portion\", \"User Portion in Percent\", \"System Portion\", \"System Portion in Percent\"])\n wr.writerows(first_table_values)\n outfile_first_table.close()\n \n #application Summary by rank data (second table) \n #Columns: \"Rank\",\"Proc Wall Time\",\"User\" , \"User in Percent\",\"MPI\", \"MPI in Percent\"\n with open(os.path.join(csvPath,'applicationSummaryByRank_2st_table.csv'), 'w') as outfile_second_table:\n wr = csv.writer(outfile_second_table, delimiter=';')\n wr.writerow([\"Rank\",\"Proc Wall Time\",\"User\" , \"User in Percent\",\"MPI\", \"MPI in Percent\"])\n wr.writerows(second_table_values)\n outfile_second_table.close()\n\n #application Summary by rank data (third table)\n #Columns: \"Rank\",\"Proc MPI Time\",\"Overhead\", \"Overhead in Percent\",\"Blocking\", \"Blocking in Percent\"\n with open(os.path.join(csvPath,'applicationSummaryByRank_3rd_table.csv'), 'w') as outfile_third_table:\n wr = csv.writer(outfile_third_table, delimiter=';')\n wr.writerow([\"Rank\",\"Proc MPI Time\",\"Overhead\", \"Overhead in Percent\",\"Blocking\", \"Blocking in Percent\"])\n wr.writerows(third_table_values)\n outfile_third_table.close()\n\n #In case, you are wondering, where the last part of the instrumentation file is (message Summary by rank),\n #it is currently not saved as a csv file. This is because:\n #\n #1st: In the platform_mpi instrumentation file, the data is somehow visualized beautifully\n #2nd: It is very hard to save the data in a 2-dimensional csv file format\n #Therefore we decided, not to export this data in a csv file format", "def create_cris_data_dict(df, filename, outdir):\n json_dict = {}\n for key in df.keys():\n if key != 'participant_id':\n json_dict[key] = {'Units': key.split()[-1]}\n else:\n json_dict[key] = {'Description': 'OpenNeuro ID of the subject.'}\n with open(outdir.joinpath(filename + '.json'), \"w\") as f:\n json.dump(json_dict, f, indent=4)", "def formatJSON(csvpath, jsonfilepath):\n\n data = {}\n my_list = []\n with open(path) as file:\n csvReader = csv.DictReader(file)\n for csvRow in csvReader:\n\n data = csvRow\n my_list.append(data)\n\n \"\"\"\n\n Write retrieved data into a json file\n NOTE: json file is automatically created when code is run from terminal\n and updates each time it run again.\n \"\"\"\n\n\n with open(jsonfilepath,\"w\") as jsonfile:\n\n jsonfile.write(json.dumps(my_list,indent=4))", "def calculated_data_to_csv(transmissivity_calculated, conductivity_calculated,\n confirmed_wells, feature_class_name):\n utm_e = [i[0][0] for i in confirmed_wells]\n utm_n = [i[0][1] for i in confirmed_wells]\n np.set_printoptions(suppress=True) #removes scientific notation\n location = np.array([utm_e, utm_n])\n location = location.transpose()\n transmissivity_calculated = np.array(transmissivity_calculated)\n conductivity_calculated = np.array(conductivity_calculated)\n joined_data = np.concatenate((location, transmissivity_calculated, conductivity_calculated), axis = 1)\n my_df = pd.DataFrame(joined_data)\n header_list = ['UTME', 'UTMN', 'T_min', 'T_raw', 'T_max', 'K_min', 'K_raw', 'K_max', 'Well ID']\n raw_csv_name = f\"{feature_class_name}.csv\"\n my_df.to_csv(raw_csv_name, index = False, header = header_list)\n return my_df, raw_csv_name", "def csv_write (data):\n \n csv_data=data[0:]\n csv1_data = open('backup.csv', 'a')\n csvwriter = csv.writer(csv1_data)\n\n count = 0\n\n for i in csv_data:\n if count == 0:\n header = i.keys()\n csvwriter.writerow(header)\n count += 1\n csvwriter.writerow(i.values())\n\n csv1_data.close()\n\n #http://blog.appliedinformaticsinc.com/how-to-parse-and-convert-json-to-csv-using-python/", "def get_operators(city):\n\n\tresult = {}\n\tdf = pd.read_csv(output_file, sep = ';')\n\n\t#filter by city\n\tdf = df.loc[df['city'] == city]\n\n\tif 0 == len(df):\n\t\tmsg = \"No operator data available in our database for the requested address!\"\n\t\treturn jsonify(message = msg)\n\telse:\n\t\tfor index in df.index:\n\t\t\toperateur = df['Operateur'][index]\n\t\t\toperateur_name = operators_id[str(operateur)]\n\t\t\ttwo_G = 'True' if df['2G'][index] == 1 else 'False'\n\t\t\ttree_G = 'True' if df['3G'][index] == 1 else 'False'\n\t\t\tfour_G = 'True' if df['4G'][index] == 1 else 'False'\n\t\t\tresult[operateur_name] = {\"2G\": two_G, \"3G\": tree_G, \"4G\": four_G}\n\n\t\treturn result", "def create_csv(json_file):\n with open('json_data.csv', 'w', newline='') as json_data:\n filewriter = csv.writer(json_data, delimiter=',')\n filewriter.writerow(['timestamp', 'open',\n 'high', 'low', 'close', 'volume'])\n for data in json_file:\n timestamp = data[\"date\"]\n open_price = data[\"opening\"]\n high_price = data[\"high\"]\n low_price = data[\"low\"]\n close_price = data[\"closing\"]\n volume = data[\"volume\"]\n filewriter.writerow([timestamp, open_price, high_price,\n low_price, close_price, volume])", "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n df = create_data_frame(input_filepath)\n process_columns(df)\n logger.info(df.head())\n df.to_csv(output_filepath, index=False)", "def _write_local_data_files(self, cursor):\n schema = list(map(lambda schema_tuple: schema_tuple[0], cursor.description))\n file_no = 0\n tmp_file_handle = NamedTemporaryFile(delete=True)\n tmp_file_handles = {self.filename.format(file_no): tmp_file_handle}\n\n # Save file header for csv if required\n if(self.export_format['file_format'] == 'csv'):\n\n # Deal with CSV formatting. Try to use dialect if passed\n if('csv_dialect' in self.export_format):\n # Use dialect name from params\n dialect_name = self.export_format['csv_dialect']\n else:\n # Create internal dialect based on parameters passed\n dialect_name = 'mysql_to_gcs'\n csv.register_dialect(dialect_name,\n delimiter=self.export_format.get('csv_delimiter') or\n ',',\n doublequote=self.export_format.get(\n 'csv_doublequote') or\n 'True',\n escapechar=self.export_format.get(\n 'csv_escapechar') or\n None,\n lineterminator=self.export_format.get(\n 'csv_lineterminator') or\n '\\r\\n',\n quotechar=self.export_format.get('csv_quotechar') or\n '\"',\n quoting=eval(self.export_format.get(\n 'csv_quoting') or\n 'csv.QUOTE_MINIMAL'))\n # Create CSV writer using either provided or generated dialect\n csv_writer = csv.writer(tmp_file_handle,\n encoding='utf-8',\n dialect=dialect_name)\n\n # Include column header in first row\n if('csv_columnheader' in self.export_format and\n eval(self.export_format['csv_columnheader'])):\n csv_writer.writerow(schema)\n\n for row in cursor:\n # Convert datetimes and longs to BigQuery safe types\n row = map(self.convert_types, row)\n\n # Save rows as CSV\n if(self.export_format['file_format'] == 'csv'):\n csv_writer.writerow(row)\n # Save rows as JSON\n else:\n # Convert datetime objects to utc seconds, and decimals to floats\n row_dict = dict(zip(schema, row))\n\n # TODO validate that row isn't > 2MB. BQ enforces a hard row size of 2MB.\n s = json.dumps(row_dict, sort_keys=True)\n if PY3:\n s = s.encode('utf-8')\n tmp_file_handle.write(s)\n\n # Append newline to make dumps BigQuery compatible.\n tmp_file_handle.write(b'\\n')\n\n # Stop if the file exceeds the file size limit.\n if tmp_file_handle.tell() >= self.approx_max_file_size_bytes:\n file_no += 1\n tmp_file_handle = NamedTemporaryFile(delete=True)\n tmp_file_handles[self.filename.format(file_no)] = tmp_file_handle\n\n # For CSV files, weed to create a new writer with the new handle\n # and write header in first row\n if(self.export_format['file_format'] == 'csv'):\n csv_writer = csv.writer(tmp_file_handle,\n encoding='utf-8',\n dialect=dialect_name)\n if('csv_columnheader' in self.export_format and\n eval(self.export_format['csv_columnheader'])):\n csv_writer.writerow(schema)\n\n return tmp_file_handles", "def generate_csv(lists, output_file):\n if os.path.isfile(output_file):\n with open(output_file, 'a') as file:\n dataset = tablib.Dataset()\n for l in lists:\n dataset.append([l['Original ASIN'], l['Associated ASIN'], l['Title'], l['Price'], l['Currency Code'], l['Relationship']])\n file.write(dataset.csv)\n else:\n with open(output_file, 'w+') as fp:\n dataset = tablib.Dataset(headers=['Original ASIN', 'Associated ASIN', 'Title', 'Price', 'Currency Code', 'Relationship'])\n for l in lists:\n dataset.append([l['Original ASIN'], l['Associated ASIN'], l['Title'], l['Price'], l['Currency Code'], l['Relationship']])\n fp.writelines(dataset.csv)", "def save_json_file():\n global output_on_display, import_lst, column_names, data, new_data\n if data_base == '':\n mistake_load_table()\n else:\n column_names = new_data\n step = len(column_names)\n save_name = asksaveasfilename(title=\"Select file\", filetypes=((\"JSON\", \"*.json\"), (\"all files\", \"*.*\")),\n confirmoverwrite=True, defaultextension='.json')\n data = import_lst\n\n if len(data[0]) == step:\n pass\n else:\n data = import_lst[step::]\n\n data2 = list(map(list, zip(*data)))\n\n data3 = {key: value for key, value in zip(column_names, data2)}\n\n column = list(data3.keys())\n\n df = pd.DataFrame(data3, columns=column)\n\n data_dict = df.to_dict(orient=\"records\")\n with open(save_name, \"w+\") as f:\n json.dump(data_dict, f, indent=4)\n\n data.clear()\n data2.clear()\n data3.clear()", "def batch_run_cfg2json():\n cfg_path = os.environ.get(\"CFG_FILE_PATH\")\n cfg_list = ['any_n1.cfg',\n 'ir_grism_n2.cfg',\n 'ir_grism_n4.cfg',\n 'ir_any_n2.cfg',\n 'ir_any_n4.cfg',\n 'uvis_any_n2.cfg',\n 'uvis_any_n4.cfg',\n 'uvis_any_n6.cfg',\n 'uvis_any_pre2012_n2.cfg',\n 'uvis_any_pre2012_n4.cfg',\n 'uvis_any_pre2012_n6.cfg',\n 'wfc_any_n2.cfg',\n 'wfc_any_n4.cfg',\n 'wfc_any_n6.cfg',\n 'sbc_blue_n2.cfg',\n 'sbc_blue_n6.cfg',\n 'sbc_any_n2.cfg',\n 'sbc_any_n6.cfg',\n 'hrc_any_n2.cfg',\n 'hrc_any_n4.cfg',\n 'hrc_any_n6.cfg']\n for cfgfile in cfg_list:\n cfgfile = os.path.join(cfg_path, cfgfile)\n cfg2json(cfgfile)\n\n cfg_path = os.path.realpath(__file__).replace(\"devutils/pars_utils.py\", \"pars/\")\n out_path = os.path.realpath(__file__).replace(\"devutils/pars_utils.py\", \"pars/hap_pars/any/\")\n cfg_list = [\"astrodrizzle_filter_hap.cfg\", \"astrodrizzle_single_hap.cfg\", \"astrodrizzle_total_hap.cfg\"]\n for cfgfile in cfg_list:\n cfgfile = os.path.join(cfg_path, cfgfile)\n cfg2json(cfgfile, outpath=out_path)", "def extract_json_to_files(input_dir,output_dir):\n files={}\n files['train']='train-v1.1.json'\n files['dev']='dev-v1.1.json'\n\n for file in files:\n filename=os.path.join(input_dir,files[file])\n with open(filename,'r',encoding='utf-8') as data_file:\n examples = []\n dataset=json.load(data_file)\n count_total=total_exs(dataset)\n count_mapping_problem=0\n count_token_problem=0\n count_ansspan_problem=0\n count_examples=0\n for article_id in tqdm(range(len(dataset['data'])), desc=\"Preprocessing {}\".format(file)):\n article_paragraph=dataset['data'][article_id]['paragraphs']\n for paragraph_id in range(len(article_paragraph)):\n context=article_paragraph[paragraph_id]['context']\n context=context.replace(\"''\",'\"').replace(\"``\",'\"')\n context = context.replace('\\u3000', ' ').replace('\\u202f',' ').replace('\\u2009', ' ')#.replace(\"'\",\"'\")\n context=context.replace('\\-',' ')\n context_tokens=tokenize_sequence(context)\n context=context.lower()\n qas=article_paragraph[paragraph_id]['qas']\n charloc2wordloc=get_char_word_loc_mapping(context, context_tokens)\n if charloc2wordloc is None:\n count_mapping_problem+=len(qas)\n continue\n for qa in qas:\n question=qa['question'].lower()\n question_tokens=tokenize_sequence(question)\n\n ans_text=qa['answers'][0]['text'].lower()\n ans_text=ans_text.replace('\\u3000', ' ').replace('\\u202f', ' ').replace('\\u2009', ' ')\n ans_start_loc=qa['answers'][0]['answer_start']\n if qa['id'] in ['5706baed2eaba6190074aca5','57269c73708984140094cbb5','57269c73708984140094cbb7','572a11661d04691400779721','572a11661d04691400779722','572a11661d04691400779723','572a11661d04691400779724','572a11661d04691400779725','572a2cfc1d0469140077981b','572a3a453f37b319004787e9','572a84d3f75d5e190021fb3c']:\n ans_start_loc+=1\n if qa['id'] in ['572a5df77a1753140016aedf','572a5df77a1753140016aee0','572a84d3f75d5e190021fb38','572a84d3f75d5e190021fb39','572a84d3f75d5e190021fb3a','572a84d3f75d5e190021fb3b','572a85df111d821400f38bad','572a85df111d821400f38bae','572a85df111d821400f38baf','572a85df111d821400f38bb0']:\n ans_start_loc+=2\n if qa['id'] in ['572a5df77a1753140016aee1','572a5df77a1753140016aee2']:\n ans_start_loc+=3\n if qa['id'] in ['57286bf84b864d19001649d6','57286bf84b864d19001649d5']:\n ans_start_loc-=1\n if qa['id'] in ['5726bee5f1498d1400e8e9f3','5726bee5f1498d1400e8e9f4']:\n ans_start_loc-=2\n ans_end_loc=ans_start_loc+len(ans_text)\n\n if context[ans_start_loc:ans_end_loc]!=ans_text:\n count_ansspan_problem+=1\n continue\n ans_start_wordloc = charloc2wordloc[ans_start_loc][1] # answer start word loc\n ans_end_wordloc = charloc2wordloc[ans_end_loc-1][1] # answer end word loc\n assert ans_start_wordloc <= ans_end_wordloc\n\n ans_tokens = context_tokens[ans_start_wordloc:ans_end_wordloc + 1]\n if \"\".join(ans_tokens) != \"\".join(ans_text.split()):\n count_token_problem += 1\n #print(ans_text)\n #print(ans_tokens)\n continue # skip this question/answer pair\n examples.append((' '.join(context_tokens),' '.join(question_tokens),' '.join(ans_tokens),' '.join([str(ans_start_wordloc),str(ans_end_wordloc)])))\n print(\"Number of (context, question, answer) triples discarded due to char -> token mapping problems: \", count_mapping_problem)\n print(\"Number of (context, question, answer) triples discarded because character-based answer span is unaligned with tokenization: \",count_token_problem)\n print(\"Number of (context, question, answer) triples discarded due character span alignment problems (usually Unicode problems): \",count_ansspan_problem)\n print(\"Processed %i examples of total %i\\n\" % (len(examples), len(examples)+count_mapping_problem+count_token_problem+count_ansspan_problem))\n indices = list(range(len(examples)))\n np.random.shuffle(indices)\n with open(os.path.join(output_dir,file+'.context'),'w',encoding='utf-8') as context_file, \\\n open(os.path.join(output_dir,file+'.question'),'w',encoding='utf-8') as question_file, \\\n open(os.path.join(output_dir,file+'.answer'),'w',encoding='utf-8') as answer_file, \\\n open(os.path.join(output_dir,file+'.span'),'w',encoding='utf-8') as span_file:\n for i in indices:\n (context,question,answer,span)=examples[i]\n context_file.write(context+'\\n')\n question_file.write(question+'\\n')\n answer_file.write(answer+'\\n')\n span_file.write(span+'\\n')", "def lines_():\n query = f\"\"\"\nSELECT script_l, `name`, episode\nFROM script\nINNER JOIN characters\nON characters.char_id = script.characters_char_id\nINNER JOIN episodes\nON episodes.ep_id = script.episodes_ep_id\n\"\"\"\n data = pd.read_sql_query(query, engine)\n return data.to_json(orient=\"records\")", "def result_writer(result_poly):\n val = {}\n val[\"type\"] = \"FeatureCollection\"\n val[\"features\"] = result_poly\n with open(output_file_path, 'w') as outfile:\n json.dump(val, outfile, indent=3)\n outfile.close()", "def generate_csv(type, json_list, columns_list):\n with open(\"data/\" + type + \"_\" + time.strftime(\"%Y-%m-%d_%H:%M:%S\") +\n \".csv\", 'a+') as f:\n csv_file = csv.DictWriter(f, fieldnames=columns_list,\n extrasaction=\"ignore\")\n csv_file.writeheader()\n for item in json_list:\n csv_file.writerow(item)\n print(\"\\nCSV file saved as data/\" + type + \"_\" +\n time.strftime(\"%Y-%m-%d_%H:%M:%S\") + \".csv\")", "def make_df_from_json(json_files, out_file):\n table = [[\"name\", \n \"cik\", \n \"city\",\n \"state\",\n \"street1\",\n \"street2\",\n \"zip_code\",\n \"year_of_incorp\", \n \"min_inv\", \n \"tot_off\", \n \"tot_sold\", \n \"tot_rem\", \n \"ind_group_type\", \n \"has_non_accred\", \n \"num_non_accred\", \n \"tot_num_inv\"\n ]] \n\n for json_dict in json_files:\n\n with open(json_dict, \"rb\") as f:\n data = json.load(f)\n print(json_dict)\n\n for i, key in enumerate(data):\n # if i % 1000 == 0:\n # print(i)\n entry = data[key] \n if entry == {}:\n #print(\"missing entry {0}\".format(i))\n continue\n row = []\n\n primary_issuer = entry[\"Primary Issuer\"]\n cik = primary_issuer[\"cik\"]\n name = primary_issuer[\"entity_name\"]\n phone = primary_issuer[\"phone\"]\n year_of_incorp = primary_issuer[\"year_of_incorp\"]\n address = primary_issuer[\"address\"]\n city = address[\"city\"]\n state = address[\"state\"]\n street1 = address[\"street1\"]\n street2 = address[\"street2\"]\n zip_code = address[\"zip_code\"]\n\n secondary_issuers = entry[\"Secondary Issuers\"]\n related_people = entry[\"Related People\"]\n \n offering_data = entry[\"Offering Data\"]\n min_inv = offering_data[\"min_investment_accepted\"]\n tot_off = offering_data[\"total_offering_amount\"]\n tot_sold = offering_data[\"total_amount_sold\"]\n tot_rem = offering_data[\"total_remaining\"]\n ind_group_type = offering_data[\"ind_group_type\"]\n has_non_accred = offering_data[\"has_non_accred\"]\n num_non_accred = offering_data[\"num_non_accred\"]\n tot_num_inv = offering_data[\"tot_num_inv\"] \n\n row = [name, \n cik, \n city,\n state,\n street1,\n street2,\n zip_code,\n year_of_incorp,\n min_inv,\n tot_off,\n tot_sold,\n tot_rem,\n ind_group_type,\n has_non_accred,\n num_non_accred,\n tot_num_inv\n ]\n\n table.append(row)\n\n df = pd.DataFrame(table)\n df.to_csv(out_file)\n\n return 0", "def main(input_filepath, output_filepath):\n # return processed data and save in the output files\n in_data_y, y_output, in_data = make_data_set(input_filepath)\n in_data_y.to_csv(output_filepath)\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n return in_data_y, y_output, in_data", "def generate_data(filename_in, filename_out):\n file_in = open(filename_in, 'r')\n file_out = open(filename_out, 'w+')\n\n df = pd.read_csv(file_in, header=None, sep=' ', quoting=csv.QUOTE_NONE)\n x = df.iloc[:, 0].values\n y_class = df.iloc[:, -1].values\n file_in.close()\n\n y_class = np.where(y_class == 'O', 0, 1)\n\n x_features = []\n size_x = len(x)\n for i in range(3, size_x):\n if i % 5000 == 0:\n print(i, \"/\", size_x)\n x_features.append(features(x[i-2], x[i-1], x[i], y_class[i]))\n\n df_write = pd.DataFrame(x_features)\n\n tab = [x for x in range(1, NUMBER_OF_FEATURE + 2)]\n df_write.columns = tab\n write_csv(df_write, file_out)\n file_out.close()", "def MongoStockTable(inputfilename):\n input=open(inputfilename)\n data_string=json.load(input)\n jsonOutput=\"{ \\\"portfolio\\\":[\"\n for portfolio in data_string[\"portfolio\"]:\n if portfolio[\"display\"] == \"yes\":\n jsonOutput+=\"{\\\"portfolioname\\\":\\\"\" + portfolio[\"portfolioName\"]+\"\\\", \\\"portfolioStocks\\\":[\"\n cumulative=Accumulator.Accumulator()\n for data in portfolio[\"portfolioStocks\"]:\n stock=Stock(data)\n cumulative.Add(stock.totalpurchaseprice, stock.commission_to_buy, stock.dollarGain,stock.dailyChange_func() ,stock.currentWorth_func() )\n jsonOutput+=stock.JSON()+\",\" \n jsonOutput=jsonOutput.rstrip(',') # remove that trailing extraneous , [:-1]\n jsonOutput+=\"],\"\n jsonOutput+=\"\\n\" \n jsonOutput+=\"\\\"cumulative Result\\\":\"+cumulative.JSONify()+\"},\" \n jsonOutput=jsonOutput.rstrip(',') \n jsonOutput+=\"] }\"\n MongoSave(ast.literal_eval(jsonOutput))\n input.close()", "def write_output_file(ad_models):\n\n with open('output-data-utf8.csv', 'w', newline='', encoding='UTF-8') as output_file:\n csv_writer = csv.writer(output_file, delimiter=',')\n for ad in ad_models:\n csv_writer.writerow((ad.date.strftime('%Y/%m/%d'), ad.country_code, ad.impression, ad.clicks))", "def encode_csv(self,\n is_ensemble_data=True,\n is_ancillary_data=True,\n is_amplitude=True,\n is_correlation=True,\n is_beam_velocity=True,\n is_instrument_velocity=True,\n is_earth_velocity=True,\n is_good_beam=True,\n is_good_earth=True,\n is_bottom_track=True,\n is_range_tracking=True,\n is_nmea_data=True,\n is_system_setup=True):\n result = []\n\n dt = datetime.datetime.now()\n blank = 0\n bin_size = 0\n if self.IsAncillaryData:\n blank = self.AncillaryData.FirstBinRange\n bin_size = self.AncillaryData.BinSize\n\n # Get the subsystem code and config\n ss_code = \"\"\n ss_config = \"\"\n if self.IsEnsembleData and is_ensemble_data:\n ss_code = self.EnsembleData.SysFirmwareSubsystemCode\n ss_config = self.EnsembleData.SubsystemConfig\n\n # Create a new datetime based off ensemble date and time\n dt = self.EnsembleData.datetime()\n\n result += self.EnsembleData.encode_csv(dt, ss_code, ss_config, blank, bin_size)\n\n if self.IsAncillaryData and is_ancillary_data:\n result += self.AncillaryData.encode_csv(dt, ss_code, ss_config, blank, bin_size)\n if self.IsAmplitude and is_amplitude:\n result += self.Amplitude.encode_csv(dt, ss_code, ss_config, blank, bin_size)\n if self.IsCorrelation and is_correlation:\n result += self.Correlation.encode_csv(dt, ss_code, ss_config, blank, bin_size)\n if self.IsBeamVelocity and is_beam_velocity:\n result += self.BeamVelocity.encode_csv(dt, ss_code, ss_config, blank, bin_size)\n if self.IsInstrumentVelocity and is_instrument_velocity:\n result += self.InstrumentVelocity.encode_csv(dt, ss_code, ss_config, blank, bin_size)\n if self.IsEarthVelocity and is_earth_velocity:\n result += self.EarthVelocity.encode_csv(dt, ss_code, ss_config, blank, bin_size)\n if self.IsGoodBeam and is_good_beam:\n result += self.GoodBeam.encode_csv(dt, ss_code, ss_config, blank, bin_size)\n if self.IsGoodEarth and is_good_earth:\n result += self.GoodEarth.encode_csv(dt, ss_code, ss_config, blank, bin_size)\n if self.IsBottomTrack and is_bottom_track:\n result += self.BottomTrack.encode_csv(dt, ss_code, ss_config, blank, bin_size)\n if self.IsRangeTracking and is_range_tracking:\n result += self.RangeTracking.encode_csv(dt, ss_code, ss_config, blank, bin_size)\n if self.IsNmeaData and is_nmea_data:\n result += self.NmeaData.encode_csv(dt, ss_code, ss_config, blank, bin_size)\n if self.IsSystemSetup and is_system_setup:\n result += self.SystemSetup.encode_csv(dt, ss_code, ss_config, blank, bin_size)\n\n return result", "def cat_json(output_filename, input_filenames):\n\twith open(output_filename, \"w\") as outfile:\n\t\tfirst = True\n\t\tcounter = -1\n\t\tfor infile_name in input_filenames:\n\t\t\twith open(infile_name) as infile:\n\t\t\t\tif first:\n\t\t\t\t\toutfile.write('{')\n\t\t\t\t\tfirst = False\n\t\t\t\telse:\n\t\t\t\t\toutfile.write(',')\n\t\t\t\toutfile.write(mangle(infile.read(), counter))\n\t\t\t\tcounter -= 1\n\t\toutfile.write('}')", "def writeCSV():\n final_list = get_final_list()\n path_to_csv_File = 'system_metrics.csv'\n\n csv_file = open(path_to_csv_File, 'w+', newline='', encoding=\"utf8\")\n csv_file_writer = csv.writer(csv_file, delimiter=',')\n\n csv_file_writer.writerow(['Subscription', 'Resource', 'MetricType',\n 'Timestamp', 'Unit', 'Minimum', 'Maximum', 'Average'])\n\n for item in final_list:\n csv_file_writer.writerow([item['subscription'], item['resource'], item['metricType'], item['timestamp'],\n item['unit'], item['minimum'], item['maximum'], item['average']])\n\n print('Output written successfully!!')", "def csv_to_json(file_path: Path) -> dict:\n\n output = {}\n\n with open(file_path, \"r\", ) as f:\n columns = get_columns_from_csv(file_path)\n reader = csv.DictReader(f, fieldnames=columns)\n next(reader, None)\n for row in reader:\n powerplan = row[\"DESCRIPTION\"]\n\n if powerplan not in output and powerplan:\n output[powerplan] = {\n k: v\n for k, v in row.items()\n if not k.startswith(\"PHASE\") and not k.startswith(\"DOT\")\n }\n\n output[powerplan][\"phases\"] = {}\n\n phase = row[\"PHASE_DESCRIPTION\"]\n\n if phase not in output[powerplan][\"phases\"] and phase:\n output[powerplan][\"phases\"][phase] = {\n k: v for k, v in row.items() if k.startswith(\"PHASE\")\n }\n\n output[powerplan][\"phases\"][phase][\"dots\"] = {}\n\n dot = row[\"DOT_DESCRIPTION\"]\n\n if phase:\n if dot not in output[powerplan][\"phases\"][phase][\"dots\"] and dot:\n output[powerplan][\"phases\"][phase][\"dots\"][dot] = {\n k: v for k, v in row.items() if k.startswith(\"DOT\")\n }\n return output", "def read_csv():", "def generate_qps_json(input_folder, output_file):\n dic = get_qps_as_dict(input_folder)\n with open(output_file, \"w\") as f:\n f.write(json.dumps(dic))", "def save_subsample(dataset, path=os.path.join('data', 'yelp_reviews.json.gz')):\n\n df = pd.DataFrame(dataset)\n df.to_json(path, orient='records', compression='gzip', lines=True)", "def save_as_json(self,json_path):\n data = {}\n for company in self:\n\n df = company.data.copy()\n df.index = df.index.map(str)\n data[company.ticker] = json.loads(df.to_json())\n\n with open(json_path, 'w') as file:\n json.dump(data, file,indent = 4,sort_keys = True)", "def dump_csv():\n df = helper.load_dataframe('asintosku').reset_index()\n df['min'] = None\n df['max'] = None\n df.asin = df.asin + np.where(\n df.isprime == 0, '_seller', '_prime')\n del df['isprime']\n dfold = load_csv()\n merged = dfold.append(df, ignore_index=True, sort=True).sort_values(\n 'min', ascending=False).drop_duplicates(['seller_sku'])\n merged[['asin', 'mean', 'min', 'max', 'seller_sku']].to_csv(\n datafolder+filename, index=False)", "def summarise_data(trip_in, station_data, trip_out):\n # generate dictionary of station - city mapping\n station_map = create_station_mapping(station_data)\n \n with open(trip_out, 'w') as f_out:\n # set up csv writer object \n out_colnames = ['duration', 'start_date', 'start_year',\n 'start_month', 'start_hour', 'weekday',\n 'start_city', 'end_city', 'subscription_type'] \n trip_writer = csv.DictWriter(f_out, fieldnames = out_colnames)\n trip_writer.writeheader()\n \n for data_file in trip_in:\n with open(data_file, 'r') as f_in:\n # set up csv reader object\n trip_reader = csv.DictReader(f_in)\n\n # collect data from and process each row\n for row in trip_reader:\n new_point = {}\n \n # convert duration units from seconds to minutes\n ### Question 3a: Add a mathematical operation below ###\n ### to convert durations from seconds to minutes. ###\n new_point['duration'] = float(row['Duration'])/60\n \n # reformat datestrings into multiple columns\n ### Question 3b: Fill in the blanks below to generate ###\n ### the expected time values. ###\n trip_date = datetime.strptime(row['Start Date'], '%m/%d/%Y %H:%M')\n new_point['start_date'] = trip_date.strftime('%Y-%m-%d')\n new_point['start_year'] = trip_date.strftime('%Y') # or : trip_date.year\n new_point['start_month'] = trip_date.strftime('%m') # or : trip_date.month\n new_point['start_hour'] = trip_date.strftime('%H') # or : trip_date.hour\n new_point['weekday'] = trip_date.strftime('%a') # or : trip_date.weekday() OR trip_date.isoweekday()\n\n \n # remap start and end terminal with start and end city\n new_point['start_city'] = station_map[row['Start Terminal']]\n new_point['end_city'] = station_map[row['End Terminal']]\n # two different column names for subscribers depending on file\n if 'Subscription Type' in row:\n new_point['subscription_type'] = row['Subscription Type']\n else:\n new_point['subscription_type'] = row['Subscriber Type']\n\n # write the processed information to the output file.\n trip_writer.writerow(new_point)", "def save_as_csv(time_series, data, path_and_file_name):\n\n parent_name = \"test\"\n parent_uqid = uuid.uuid4()\n\n file_obj = open(path_and_file_name, 'w')\n file_obj.write('version,'+str(2)+'\\n')\n file_obj.write('numOfCH,'+str(1)+'\\n')\n file_obj.write('type, scan\\n')\n file_obj.write('ch_type,'+str(0)+'\\n')\n\n file_obj.write('carpet pos,'+str(0)+'\\n')\n file_obj.write('parent_name,'+str(parent_name)+'\\n')\n file_obj.write('parent_uqid,'+str(parent_uqid)+'\\n')\n file_obj.write('parent_filename,'+str(path_and_file_name)+'\\n')\n\n file_obj.write('pc, 0\\n')\n file_obj.write('Time (ns), CH0 Auto-Correlation\\n')\n for time_step in range(0, time_series.shape[0]):\n file_obj.write(str(float(time_series[time_step]))+','+str(data[time_step])+ '\\n')\n file_obj.write('end\\n')\n\n file_obj.close()", "def get_datalist_fr_json(self):\n raw_data = json.load(open(self.saved_json_file, 'r'))\n for indivdual_set in raw_data['query']['results']['stats']:\n temp_dict_data = {}\n if type(indivdual_set) == str:\n #for single data\n continue # temp do not use\n for parameters in indivdual_set.keys():\n if type(indivdual_set[parameters]) == str:\n temp_dict_data[parameters] = indivdual_set[parameters]#for symbol\n elif type(indivdual_set[parameters]) == dict:\n if indivdual_set[parameters].has_key('content'):\n temp_dict_data[parameters] = indivdual_set[parameters]['content']\n\n ## append to list\n self.com_data_allstock_list.append(temp_dict_data)", "def convert_to_json(self, rows):\n\t\tjson_list = []\n\t\tfor row in rows:\n\t\t\tjson_record = {}\n\t\t\tjson_record[\"movie_id\"] = row[0]\n\t\t\tjson_record[\"title\"] = change_title(row[1])\n\t\t\tjson_record[\"genres\"] = row[2][:5]\n\t\t\tjson_record[\"imdb_id\"] = row[3]\n\t\t\tjson_record[\"tmdb_id\"] = row[4]\n\t\t\tjson_record[\"rating\"] = row[5]\n\t\t\tjson_record[\"number_of_ratings\"] = row[6]\n\t\t\tjson_record[\"weighted_rating\"] = row[7]\n\t\t\tjson_record[\"release_year\"] = row[8]\n\t\t\tjson_record[\"img_path\"] = row[9]\n\t\t\tjson_record[\"description\"] = row[10]\n\t\t\tjson_record[\"director\"] = row[11]\n\t\t\tjson_record[\"length\"] = row[12]\n\t\t\tjson_list.append(json_record)\n\t\treturn json.dumps(json_list, indent = 4)", "def test_parse(self, tmpdir):\n json_file = tmpdir.join(\"f.json\")\n obj = {\"ds\": [{\"file\": \"data.nc\", \"size\": 0, \"mtime\": 0, \"sha256\": 0}]}\n with open(str(json_file), \"w\") as f:\n json.dump(obj, f)\n\n csv_file = tmpdir.join(\"f.csv\")\n csv_file.write(\"\\n\".join([\n \",\".join(HEADER_ROW),\n \"ds,1,url,title,yes,no,{}\".format(str(json_file))\n ]))\n\n expected = {\n \"ds\": {\n \"generate_aggregation\": True,\n \"include_in_wms\": False,\n \"tech_note_title\": \"title\",\n \"tech_note_url\": \"url\",\n \"files\": [\n {\"path\": \"data.nc\", \"size\": 0, \"mtime\": 0, \"sha256\": 0}\n ]\n }\n }\n\n s = StringIO()\n sys.stdout = s\n parse_file(str(csv_file))\n sys.stdout = sys.__stdout__\n\n output_json = s.getvalue()\n try:\n parsed = json.loads(output_json)\n except ValueError:\n assert False, \"parse_file() produced invalid JSON\"\n\n assert parsed == expected", "def write_to_csv(path,data_dict):\n\n\n schema = [\"file_name\",\"family\",\"genus\",\"genus_confidence\",\n \"species_1\",\"confidence_1\",\"hall_1\",\n \"species_2\",\"confidence_2\",\"hall_2\",\n \"species_3\",\"confidence_3\",\"hall_3\",\n \"species_4\",\"confidence_4\",\"hall_4\",\"peaks\"]\n\n # if no file exists create a one and inform the user\n if not os.path.exists(path):\n print(\"creating new output file {}\".format(path))\n with open(path, \"w\") as csv_file:\n filewriter = csv.writer(csv_file, delimiter=\",\")\n filewriter.writerow(schema)\n\n row = []\n\n row.append(data_dict[\"file_name\"])\n row.append(data_dict[\"family\"])\n \n row.append(data_dict[\"genus_1\"])\n row.append(data_dict[\"genus_confidence_1\"][:5])\n \n row.append(data_dict[\"species_1\"])\n row.append(data_dict[\"confidence_1\"][:5])\n row.append(data_dict[\"hall_1\"])\n \n row.append(data_dict[\"species_2\"])\n row.append(data_dict[\"confidence_2\"][:5])\n row.append(data_dict[\"hall_2\"])\n\n row.append(data_dict[\"species_3\"])\n row.append(data_dict[\"confidence_3\"][:5])\n row.append(data_dict[\"hall_3\"])\n\n row.append(data_dict[\"species_4\"])\n row.append(data_dict[\"confidence_4\"][:5])\n row.append(data_dict[\"hall_4\"])\n\n row.append(data_dict[\"peaks\"])\n \n with open(path, \"a\") as csv_file:\n filewriter = csv.writer(csv_file, delimiter=\",\")\n filewriter.writerow(row)", "def save_model_output2csv(RunSet='FP-MOYA-Nest', res='0.25x0.3125',\n folder='./'):\n import seaborn as sns\n # Which flights to plot?\n if (RunSet == 'FP-MOYA-Nest') and (res == '0.25x0.3125'):\n # Local settings/variables\n flight_IDs = ['C006', 'C007']\n sdate_d = {\n 'C006': datetime.datetime(2017, 3, 1),\n 'C007': datetime.datetime(2017, 3, 2),\n }\n # Loop by flight and retrieve the files as dataframes\n dfs_mod = {}\n for flight_ID in flight_IDs:\n # Get data\n sdate = sdate_d[flight_ID]\n dfs_mod_GC = get_GEOSChem4flightnum(flight_ID=flight_ID,\n res=res,\n RunSet=RunSet,\n sdate=sdate,\n )\n # Save to csv\n df = dfs_mod_GC[list(dfs_mod_GC.keys())[0]]\n filename_str = 'GC_planeflight_data_{}_{}'\n filename = filename_str.format(RunSet, flight_ID)\n# filename = AC.rm_spaces_and_chars_from_str(filename)\n df.to_csv(os.path.join(folder+filename+'.csv'))\n\n elif (res == '0.25x0.3125') and (RunSet == 'FP-Nest'):\n flight_nums = [\n # 217,\n 218, 219, 220, 221, 222, 223, 224, 225,\n ]\n flight_IDs = ['C{}'.format(i) for i in flight_nums]\n # - Loop by flight and retrieve the files as dataframes (mod + obs)\n # Model\n dfs_mod_GC = {}\n for flight_ID in flight_IDs:\n dfs = get_GEOSChem4flightnum(flight_ID=flight_ID, res=res,\n RunSet=RunSet,)\n df = dfs[RunSet]\n # Add the derived variables to the dataframe\n df = add_deriv_vars2df(df=df)\n# dfs_mod[flight_ID] = df\n # Save to csv\n# df = dfs_mod_GC[ list(dfs_mod_GC.keys())[0] ]\n filename_str = 'GC_planeflight_data_{}_{}'\n filename = filename_str.format(RunSet, flight_ID)\n# filename = AC.rm_spaces_and_chars_from_str(filename)\n df.to_csv(os.path.join(folder+filename+'.csv'))", "def _write_json(\n output_path, records\n):\n output_path.write_text(json.dumps(records))", "def create_json_report(output):\n # Initial work, just dump mia_metrics and dummy_metrics into a json structure\n return json.dumps(output, cls=NumpyArrayEncoder)", "def build():\n for root, dirs, files in os.walk(IN_PATH):\n for filename in files:\n if filename.endswith('.csv'):\n with open(os.path.join(IN_PATH, filename), encoding='utf-8') as f:\n reader = csv.reader(f)\n next(reader)\n data = nested_dict()\n web_data = nested_dict()\n for row in reader:\n if row[0].startswith('report.') or row[0].startswith('cardset.'):\n d = data\n elif row[0].startswith('web.'):\n d = web_data\n path = row[0].split('.')\n for i in range(len(path)):\n if i == len(path) - 1:\n d[path[i]] = row[1]\n else:\n d = d[path[i]]\n with open (os.path.join(OUT_PATH, filename.replace('.csv', '.json')), 'w', encoding='utf-8') as fout:\n json.dump({**data, **web_data}, fout)\n with open (os.path.join(WEB_PATH, filename.replace('.csv', '.js')), 'w', encoding='utf-8') as fout:\n fout.write('var STRINGS = {};'.format(json.dumps(web_data)))\n\n with open(os.path.join(IN_PATH, 'en_US.csv'), encoding='utf-8') as f:\n reader = csv.reader(f)\n next(reader)\n data = nested_dict()\n web_data = nested_dict()\n for row in reader:\n path = row[0].split('.')\n if row[0].startswith('report.') or row[0].startswith('cardset.'):\n d = data\n elif row[0].startswith('web.'):\n d = web_data\n\n for i in range(len(path)):\n if i == len(path) - 1:\n d[path[i]] = zz_string(row[1], row[0])\n else:\n d = d[path[i]]\n with open(os.path.join(OUT_PATH, 'zz_ZZ.json'), 'w', encoding='utf-8') as fout:\n json.dump({**data, **web_data}, fout)\n with open(os.path.join(WEB_PATH, 'zz_ZZ.js'), 'w', encoding='utf-8') as fout:\n fout.write('var STRINGS = {};'.format(json.dumps(web_data)))", "def save_json(df):\n dict = {}\n for row in df.iterrows():\n dict[row[1]['Country']] = {'Region' : row[1]['Region'],\n 'Pop. Density (per sq. mi.)' : row[1]['Pop. Density (per sq. mi.)'],\n 'Infant mortality (per 1000 births)' : row[1]['Infant mortality (per 1000 births)'],\n 'GDP ($ per capita) dollars' : row[1]['GDP ($ per capita) dollars']\n }\n\n with open('data.json', 'w', encoding='utf8') as outfile:\n data = json.dumps(dict, indent=4, sort_keys=False, separators=(',', ': '), ensure_ascii=False)\n outfile.write(data)", "def write_csv(data, output_csv):\n with open(output_csv, 'w') as csvfile:\n fieldnames = ['minute_start',\n 'total_requests',\n 'success_count',\n 'error_count',\n 'mean_respone_time',\n 'data_sent_mb']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n writer.writeheader()\n for row in data:\n row['minute_start'] = row['minute_start'].isoformat()\n writer.writerow(row)", "def postprocess(self, inference_output):\n ret = []\n quantiles = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\n # for each request\n for inference_output_request in inference_output:\n ret_request = []\n # for each time series\n for i in inference_output_request:\n l = {}\n l[\"item_id\"] = i.item_id\n l[\"quantiles\"] = {}\n for q in quantiles:\n l[\"quantiles\"][str(q)] = i.quantile(q).tolist()\n l[\"mean\"] = i.mean.tolist()\n ret_request.append(json.dumps(l))\n ret.append('\\n'.join(ret_request) + '\\n')\n return ret", "def get_json(ticker_symbol):\n s_data = Stock.objects.filter(ticker=ticker_symbol).order_by('-date')\n json_file = []\n for data in s_data:\n json_obj = StockSerializer(data).data\n json_file.append(json_obj)\n return json_file", "def create_csv(output_file, y, tx, ids, header, is_test):\n print('\\nCreate new csv file named ' + str(output_file) + '...')\n with open(output_file, 'w') as csvfile:\n writer = csv.DictWriter(csvfile, delimiter = ',', fieldnames = header)\n writer.writeheader()\n for idx, y_row, tx_row in zip(ids, y, tx):\n if is_test:\n prediction = '?'\n else:\n prediction = 'b' if y_row == -1 else 's'\n dictionary = {'Id': int(idx),'Prediction': prediction}\n for index in range(len(tx_row)):\n dictionary[header[index + 2]] = float(tx_row[index])\n writer.writerow(dictionary)\n print('\\n... finished.')", "def post_process_output_file():\n parsed_data = []\n unparseable_data = []\n\n with open('../output/part-00000', 'r') as input_file:\n for line in input_file:\n line = line.strip()\n try:\n csv_splits = line.split(',')\n csv_splits[0] = int(csv_splits[0])\n # parsed_data is a list of lists\n parsed_data.append(csv_splits)\n except ValueError:\n unparseable_data.append(line)\n parsed_data.sort()\n\n with open('../output/titanic_test_data.csv', 'w') as output_file:\n # start with lines that couldn't be parsed\n # hopefully this will only be the original header\n for line in unparseable_data:\n output_file.write(\"%s\\n\" % line)\n for line in parsed_data:\n output_file.write(\"%d,%s\\n\" % (line[0], line[1]))", "def write_coco_json(filepath, dataset_dicts, name_to_id, **kwargs):\n info = {\n \"description\": kwargs.get(\"description\", \"\"),\n \"url\": kwargs.get(\"url\", \"\"),\n \"version\": kwargs.get(\"version\", \"0.0\"),\n \"year\": kwargs.get(\"year\", \"2017\"),\n \"contributor\": kwargs.get(\"contributor\", \"\"),\n \"date_created\": kwargs.get(\"date_created\", \"2017/01/01\"),\n }\n\n licenses = {\n \"url\": \"closed\",\n \"id\": 0,\n \"name\": \"closed\",\n }\n\n images, annotations = [], []\n annotation_id = 1\n for record in dataset_dicts:\n images.append({\n \"id\": record[\"image_id\"],\n \"width\": record[\"width\"],\n \"height\": record[\"height\"],\n \"file_name\": record[\"file_name\"]\n })\n\n for annotation in record[\"annotations\"]:\n x0, y0, x1, y1 = annotation[\"bbox\"]\n annotations.append({\n \"id\": annotation_id,\n \"category_id\": annotation[\"category_id\"],\n \"bbox\": [x0, y0, x1 - x0, y1 - y0],\n \"iscrowd\": annotation[\"iscrowd\"],\n \"image_id\": record[\"image_id\"],\n \"area\": (x1 - x0) * (y1 - y0),\n })\n annotation_id += 1\n\n categories = [{\n \"id\": category_id,\n \"name\": \"{}\".format(category_name),\n \"supercategory\": \"\"\n } for category_name, category_id in name_to_id.items()]\n\n coco_dict = {\n \"info\": info,\n \"licenses\": licenses,\n \"images\": images,\n \"annotations\": annotations,\n \"categories\": categories,\n }\n\n with filepath.open(mode=\"w\") as file_handle:\n json.dump(coco_dict, file_handle)", "def op_to_json(self, op, out_dir=r'./output_files/'):\n\n #generate export filename and export Path obj\n ts = str(datetime.now())[:-7]\n ts = ts.replace(':','').replace('-','').replace(' ','_')\n ms = self.map_size\n filename = f\"{self.name}_{ms[0]}x{ms[1]}_{ts}.json\"\n export_path = Path(out_dir).joinpath(filename)\n\n export_obj = deepcopy(SHMEPPY_JSON)\n export_obj[\"operations\"].append(op.__dict__)\n\n try:\n result_str = f\"Exporting mapfile: {str(export_path)}\\n\"\n with export_path.open(mode='w') as json_file:\n json.dump(export_obj, json_file)\n except Exception as e:\n result_str = f\"Error: {str(e)}, unable to export.\\n\"\n\n return result_str", "def export2csv(data, input_filename, output_filename):\n assert input_filename\n assert os.path.exists(output_filename) and os.path.isfile(output_filename)\n str2write_xyz = input_filename + \",\"\n for i in data:\n for j in i:\n str2write_xyz += str(j) + \",\"\n filen = input_filename.split(\".\")[0]\n str2write_xyz += filen[filen.find(os.sep) + 1:].replace(\"_\", \",\").replace(\"-\", \".\").replace(\"/\", \",\").replace(\"\\\\\", \",\") + \"\\n\"\n with open(output_filename, \"a\") as filep:\n filep.write(str2write_xyz)", "def process_files_json():\n # chdir into beep root\n pwd = os.getcwd()\n os.chdir(os.environ.get(\"BEEP_ROOT\", \"/\"))\n\n meta_list = list(filter(lambda x: '_Metadata.csv' in x, os.listdir(SRC_DIR)))\n file_list = list(filter(lambda x: '.csv' in x if x not in meta_list else None, os.listdir(SRC_DIR)))\n all_list = list(filter(lambda x: '.csv' in x, os.listdir(SRC_DIR)))\n\n all_list = sorted(all_list)\n dumpfn(all_list, \"all_files.json\")\n\n [file_id, mapdf] = init_map(PROJECT_NAME, DEST_DIR)\n\n new_file_index = file_id\n\n for filename in tqdm(sorted(file_list)):\n # If the file has already been renamed another entry should not be made\n if mapdf['filename'].str.contains(filename).sum() > 0:\n continue\n old_file = os.path.join(SRC_DIR, filename)\n new_path = os.path.join(DEST_DIR, PROJECT_NAME)\n shutil.copy(old_file, new_path) # copy main data file\n shutil.copy(old_file.replace(\".csv\", '_Metadata.csv'), new_path) # copy meta data file\n\n if PROJECT_NAME == 'FastCharge':\n [date, channel_no, strname, protocol] = get_parameters_fastcharge(filename, SRC_DIR)\n elif PROJECT_NAME == 'ClosedLoopOED':\n [date, channel_no, strname, protocol] = get_parameters_oed(filename, SRC_DIR)\n else:\n raise ValueError(\"Unsupported PROJECT_NAME: {}\".format(PROJECT_NAME))\n\n df_dup = mapdf.set_index(['protocol', 'date'])\n if (protocol, date) in df_dup.index:\n row = mapdf[(mapdf['protocol'] == protocol) & (mapdf['date'] == date)]\n file_id = row['fid'].iloc[0]\n protocol = row['protocol'].iloc[0]\n date = row['date'].iloc[0]\n strname = row['strname'].iloc[0]\n else:\n file_id = new_file_index\n new_file_index = new_file_index + 1\n\n new_name = \"{}_{}_{}\".format(PROJECT_NAME, f'{file_id:06}', channel_no)\n new_file = os.path.join(DEST_DIR, PROJECT_NAME, \"{}.csv\".format(new_name))\n\n new_row = pd.DataFrame([[file_id, protocol, channel_no, date, strname,\n os.path.abspath(old_file),\n os.path.abspath(new_file)]],\n columns=METADATA_COLUMN_NAMES)\n mapdf = mapdf.append(new_row)\n\n os.rename(os.path.join(DEST_DIR, PROJECT_NAME, filename), new_file)\n os.rename(os.path.join(DEST_DIR, PROJECT_NAME, filename).replace(\".csv\", \"_Metadata.csv\"),\n new_file.replace(\".csv\", \"_Metadata.csv\"))\n\n mapdf.to_csv(os.path.join(DEST_DIR, PROJECT_NAME, PROJECT_NAME + \"map.csv\"), index=False)\n mapdf = mapdf.reset_index(drop=True)\n os.chdir(pwd)\n return json.dumps(mapdf.to_dict(\"list\"))", "def process_song_data(spark, input_data, output_data):\n # get filepath to song data file\n song_data = input_data + 'song_data/*/*/*/*.json'\n \n # read song data file\n df = spark.read.json(song_data)\n \n # create view for songs table\n df.createOrReplaceTempView(\"songs\") \n \n \n # extract columns to create songs table. Adding Distinct and Not null to song_id as it is the primary key\n songs_table = spark.sql(\"\"\"\n SELECT DISTINCT song_id, \n title,\n artist_id,\n year,\n duration\n FROM songs\n WHERE song_id IS NOT NULL\n \"\"\")\n \n # write songs table to parquet files partitioned by year and artist\n songs_table.write.mode('overwrite').partitionBy(\"year\", \"artist_id\").parquet(output_data+'songs_table/')\n\n # create view for artists table\n df.createOrReplaceTempView(\"artists\") \n \n # extract columns to create artists table, Adding Distinct and Not null to artist_id as it is the primary key\n artists_table = spark.sql(\"\"\"\n SELECT DISTINCT artist_id, \n artist_name,\n artist_location,\n artist_latitude,\n artist_longitude\n FROM artists\n WHERE artist_id IS NOT NULL\n \"\"\")\n \n # write artists table to parquet files\n artists_table.write.mode('overwrite').parquet(output_data+'artists_table/')", "def export_data(self):\r\n \r\n \r\n output_file = 'export.csv'\r\n data = self.get_raw_data()\r\n \r\n if data != []:\r\n print('Writing to file', output_file)\r\n with open(output_file, 'w',) as csvfile:\r\n fluorescence_levels = csv.writer(csvfile)\r\n fluorescence_levels.writerow(['sensor_1','Time'])\r\n for i in data:\r\n fluorescence_levels.writerow(i)\r\n print('done')\r\n \r\n else:\r\n print('no recorded data')", "def to_csv_file_obj(self, rows):\n output = StringIO.StringIO()\n writer = csv.writer(output)\n writer.writerows(rows)\n return output", "def process_song_data(spark, input_data, output_data, mode=\"overwrite\"):\n # get filepath to song data file\n song_data = input_data + \"song_data/*/*/*/*.json\"\n\n # read song data file\n print(\"reading song logs from {}\".format(song_data))\n df = spark.read.json(song_data)\n\n # extract columns to create songs table\n song_fields = ['song_id', 'title', 'artist_id', 'year', 'duration']\n songs_table = df.select(song_fields)\n songs_table = songs_table.withColumn('year', F.col('year').cast(IntegerType()))\n songs_table = songs_table.withColumn('duration', F.col('duration').cast(DoubleType()))\n\n # write songs table to parquet files partitioned by year and artist\n song_path = output_data + 'star_schema/song_table/'\n print(\"Writing Song Table to {}\".format(song_path))\n songs_table.write \\\n .mode(mode) \\\n .partitionBy('year', 'artist_id') \\\n .parquet(song_path)\n\n # extract columns to create artists table\n artist_fields = [\n 'artist_id', 'artist_name',\n 'artist_location', 'artist_latitude', 'artist_longitude'\n ]\n artists_table = df.select(artist_fields)\n artists_table = artists_table.withColumnRenamed(\n 'artist_name', 'name'\n )\n artists_table = artists_table.withColumnRenamed(\n 'artist_location', 'location'\n )\n artists_table = artists_table.withColumn(\n 'latitude',\n F.col('artist_latitude').cast(DoubleType())\n )\n artists_table = artists_table.withColumn(\n 'longitude',\n F.col('artist_longitude').cast(DoubleType())\n )\n artist_col_names = [\n 'artist_id', 'name', 'location', 'latitude', 'longitude'\n ]\n artists_table = artists_table.select(artist_col_names)\n\n # write artists table to parquet files\n artists_path = output_data + 'star_schema/artist_table'\n print(\"Writing Artist Table to {}\".format(artists_path))\n artists_table.write \\\n .mode(mode) \\\n .partitionBy('artist_id') \\\n .parquet(artists_path)", "def prepare_data(filename='data/DOT_timeSeries.csv'):\n\n # read data file into pandas dataframe\n df = pd.read_csv(filename)\n\n # extract unwanted 'countries' from dataframe\n countries = ['Europe', 'Emerging and Developing Europe', 'Emerging and Developing Asia',\n 'Middle East, North Africa, and Pakistan', 'Export earnings: nonfuel',\n 'Sub-Saharan Africa', 'Export earnings: fuel', 'Western Hemisphere',\n 'World', 'Special Categories', 'Advanced Economies', 'CIS',\n 'Emerging and Developing Economies']\n for country in countries:\n df = extract_relevant_rows(df, column_name='Country Name', column_value=country, not_equal=True)\n df = extract_relevant_rows(df, column_name='Counterpart Country Name', column_value=country, not_equal=True)\n\n # extract exports only from data\n exports = extract_relevant_rows(df, column_name='Indicator Code', column_value='TXG_FOB_USD')\n # extract value attributes only from exports\n export_values = extract_relevant_rows(exports, column_name='Attribute', column_value='Value')\n\n return export_values", "def writecsv(obsid, date, output='obslog.csv', rawpath=None):\n logger = log.getLogger('obslog.writecsv')\n progid = obsid[:obsid.rfind('-')]\n logger.debug('Program ID: %s', progid)\n obslog = date + '_' + progid + '_obslog.txt'\n if rawpath:\n obslog = rawpath + '/' + obslog\n data = readtxt(obslog)\n\n output_data = {}\n first_spectrum = None\n files = sorted(data.keys(), reverse=True) # Note the reverse sort here\n for f in files: # Go through the whole list in case there were interruptions for re-aqcuisitions\n if data[f]['Observation ID'] == obsid:\n output_data[f] = data[f]\n logger.debug('Including %s', f)\n first_spectrum = f\n logger.debug('First spectrum: %s', first_spectrum)\n\n last_acq = None\n for i in range(files.index(first_spectrum)+1, len(files)): # again, files is reverse sorted\n if data[files[i]]['ACQ'] == 'Y':\n last_acq = files[i]\n logger.info('Last acqusition file: %s', last_acq)\n break\n\n # Get the header info for the requested images plus the last acquisition image:\n if rawpath:\n fitsfiles = [rawpath + '/' + f for f in ([last_acq] + sorted(output_data.keys()))]\n else:\n fitsfiles = [last_acq] + sorted(output_data.keys())\n headerinfo = header.info(fitsfiles)\n\n for f in output_data.keys(): # Add new keys for the absolute P and Q offsets:\n headerinfo[f]['P'] = headerinfo[f]['POFFSET'] - headerinfo[last_acq]['POFFSET']\n headerinfo[f]['Q'] = headerinfo[f]['QOFFSET'] - headerinfo[last_acq]['QOFFSET']\n logger.debug('Updated Info: %s', headerinfo)\n\n def mergedict(a, b):\n a.update(b)\n return a\n\n logger.info('Writing %s...', output) # Write the info for the requested Obs-ID into a csv file:\n with open(output, mode='w') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=['FITSFILE'] + headerinfo[f].keys())\n writer.writeheader()\n for k, d in sorted(headerinfo.items()):\n writer.writerow(mergedict({'FITSFILE': k}, d))\n\n return", "def generate_dataset_csv(request):\n\n response = csv_export(request,Dataset)\n return response", "def test_write_stock_csv(self):\n obj1 = collections.OrderedDict(\n [('a', 'A1'), ('b', 'B1'), ('c', 'C1')])\n obj2 = collections.OrderedDict(\n [('a', 'A2'), ('b', 'B2'), ('c', 'C2')])\n objects1 = [obj1, obj2]\n write_stock_csv('test.csv', objects1)\n objects2 = read_stock_csv('test.csv')\n self.assertEqual(objects1, objects2)", "def process_song_data(spark, input_data, output_data):\n \n print(\"Read song data\")\n df_song = spark.read.json(input_data+\"song_data/*/*/*/*.json\", schema=build_song_schema())\n \n # extract columns to create songs table\n songs_table = df_song[['song_id', 'title', 'artist_id', 'year', 'duration']].drop_duplicates()\n\n \n print(\"Write...\")\n # write songs table to parquet files partitioned by year and artist\n songs_table.write.save(path=output_data+'song_table',\n format='parquet',\n partitionBy=['year', 'artist_id'],\n mode='overwrite' )\n\n # extract columns to create artists table\n artists_table = df_song[['artist_id', 'artist_name', 'artist_location', 'artist_latitude', 'artist_longitude']].drop_duplicates()\n\n print(\"Write...\")\n # write artists table to parquet files\n artists_table.write.save(path=output_data+'artists_table',\n format='parquet',\n mode='overwrite' )", "def write_csv(fd, data):\n # df = pd.DataFrame.from_dict(data)\n df = pd.io.json.json_normalize(data)\n print(df.to_csv(index=False), file=fd)", "def lines_to_json():\n from os import walk\n lines = {}\n\n filenames = list(walk('lines'))[0][2]\n for file in filenames:\n line_name = file[:-4]\n dict = {\n \"name\": line_name,\n \"rulers\": [],\n \"stations\": [],\n }\n fp = open('lines/' + file, 'r', encoding='utf-8', errors='ignore')\n for i, s in enumerate(fp):\n s = s.strip()\n if i <= 2:\n continue\n if not s:\n continue\n\n try:\n st = {\n \"zhanming\": s.split(',')[0],\n \"licheng\": int(s.split(',')[1]),\n \"dengji\": int(s.split(',')[2])\n }\n except IndexError:\n print(s, file)\n dict[\"stations\"].append(st)\n lines[line_name] = dict\n fp.close()\n\n out = open('source/lines.json', 'w', encoding='utf-8')\n json.dump(lines, out, ensure_ascii=False)\n out.close()", "def ingest_data(args):\n fetchopts = {\n \"fixtures\": FIXTURES,\n \"startyear\": args.start_year or fetch.STARTYEAR,\n \"endyear\": args.end_year or fetch.ENDYEAR\n }\n\n folder, num_series = fetch.fetch_all(**fetchopts)\n\n fcsv, num_rows = wrangle.wrangle_csv()\n fjson, _ = wrangle.wrangle_json()\n\n return (\n \"Ingested %i rows in %i time series to %s\\n\"\n \"Wrote JSON data to %s\\n\"\n \"Wrote CSV data to %s\"\n ) % (num_rows, num_series, folder, fcsv, fjson)", "def _getCSVForOverall(self, statistic):\n\n rows = []\n\n final_stat = simplejson.loads(statistic.final_json)\n for name, result in final_stat.iteritems():\n rows.append([name, result])\n\n return rows", "def parse_to_csv(data,namee):\n pth = BASE_DIR + '/reports/' + csv_name\n if not os.path.isfile(namee):\n csv_file = open(namee, 'wb')\n csv_writer = csv.writer(csv_file)\n top_row = [\n 'IP', 'Host', 'os', 'Proto', 'Port',\n 'Service','Service_version', 'Product', 'Service FP',\n 'NSE Script ID', 'NSE Script Output', 'Notes'\n ]\n csv_writer.writerow(top_row)\n print('\\n[+] The file {} does not exist. New file created!\\n'.format(\n csv_name))\n # else:\n # # try:\n # csv_file = open(csv_name, 'w')\n\n # csv_writer = csv.writer(csv_file)\n # print('\\n[+] {} exists. Appending to file!\\n'.format(csv_name))\n\n \n for item in data:\n csv_writer.writerow(item)\n csv_file.close()" ]
[ "0.60670656", "0.5917598", "0.5867013", "0.577265", "0.5737042", "0.5713435", "0.56811106", "0.56413877", "0.56257904", "0.5622406", "0.5606425", "0.5585183", "0.55736536", "0.55720776", "0.5546977", "0.554406", "0.55393744", "0.5519248", "0.5511894", "0.5495278", "0.5494552", "0.5441304", "0.5411523", "0.53953856", "0.53934216", "0.5385327", "0.53577816", "0.53548515", "0.53504646", "0.5348617", "0.5332478", "0.5330306", "0.5328321", "0.53274584", "0.5317514", "0.531603", "0.53098255", "0.53087944", "0.5303019", "0.52778417", "0.52521724", "0.5242575", "0.52323467", "0.5230243", "0.5222333", "0.52065665", "0.52039605", "0.5195443", "0.51921403", "0.51876247", "0.51807016", "0.5175332", "0.5174174", "0.517388", "0.51642746", "0.51617897", "0.5159042", "0.51551247", "0.51545894", "0.51532185", "0.5148603", "0.5144348", "0.51423883", "0.5141476", "0.5135516", "0.5134827", "0.5116618", "0.5105477", "0.5103113", "0.5100404", "0.5097104", "0.5094325", "0.5093442", "0.50911087", "0.5089456", "0.50799954", "0.5074512", "0.50742465", "0.5067412", "0.50664914", "0.50661474", "0.5065914", "0.5059311", "0.5057876", "0.5057096", "0.50427914", "0.50405884", "0.5035413", "0.5031642", "0.5028541", "0.50281054", "0.5026877", "0.5025005", "0.5024941", "0.50210947", "0.50169736", "0.50141305", "0.50093454", "0.5003451", "0.4999751" ]
0.7022942
0
choose the correct instrument to use for observations for a given date range. inputs must be date objects from the datetime module. used if there is no information about which instrument was primary.
def choose_inst(given_start_date,given_end_date): #INPUTS MUST BE DATE OBJECTS inst_start_dates=[] inst_end_dates=[] good_instruments = [] good_end_dates = [] bad_inst = [] #extracting dates where instruments are active from csv file inst_dates = pd.read_csv(ref_path / 'instrument_dates.csv') for s in inst_dates['start']: inst_start_dates.append(datetime.strptime(str(s),'%Y-%m').date()) for e in inst_dates['end']: if str(e) == 'nan': inst_end_dates.append(datetime.today().date()) else: inst_end_dates.append(datetime.strptime(str(e),'%Y-%m').date()) #checking which instruments are active during given time period and #choosing the correct ones print('checking which instruments are active for given dates') for i in range(len(inst_start_dates)): if (inst_start_dates[i] < given_start_date) and (given_end_date < inst_end_dates[i]): print('%s works' %inst_dates['Instrument'][i]) good_instruments.append(inst_dates['Instrument'][i]) good_end_dates.append(inst_end_dates[i]) else: print('outside of %s range' %inst_dates['Instrument'][i]) #checking if active instruments actually have data for that date for inst in good_instruments: inst_str = inst.replace('-','').lower() year = str(given_start_date).split('-')[0] month = str(given_start_date).split('-')[1] url = ('https://satdat.ngdc.noaa.gov/sem/goes/data/avg/'+ year + '/' + month + '/' + inst_str) try: request.urlopen(url) print('%s data available' %inst) except: print('%s data NOT available' %inst) bad_inst.append(inst) #not choosing instrument if it doesn't have data for binst in bad_inst: good_instruments.remove(binst) #if more than one instrument is available, choose which one to use if len(good_instruments) > 1: print('Please choose which instrument you would like to use.') for j in range(len(good_instruments)): print('Type ' + str(j) + ' for ' + str(good_instruments[j])) inst_choice = input('Answer:' ) instrument = good_instruments[int(inst_choice)] end_date = good_end_dates[int(inst_choice)] print('we are using %s as our instrument for observations' %instrument) else: instrument = good_instruments[0] end_date = good_end_dates[0] print('we are using %s as our instrument for observations' %instrument) return([instrument,end_date])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def choose_prime_inst(given_start_date,given_end_date):\r\n\r\n #extracting primary dates where instruments are active from csv file\r\n inst_prime_dates = pd.read_csv(ref_path / 'GOES_primary_assignments.csv', header=3)\r\n\r\n #figuring out which instrument is primary for given start date\r\n for d in range(len(inst_prime_dates['Start Date'])):\r\n change_date = parse(inst_prime_dates['Start Date'][d])\r\n if given_start_date >= change_date.date():\r\n prime_inst = inst_prime_dates['EPEAD Primary'][d]\r\n backup_inst = inst_prime_dates['EPEAD Secondary'][d]\r\n end_date = parse(inst_prime_dates['Start Date'][d+1]).date()\r\n\r\n #if no prime instrument available, have to choose which instrument\r\n #to use based on which instruments have data for this date\r\n if str(prime_inst) == 'nan':\r\n if str(backup_inst) == 'nan':\r\n print('no information about primary instrument available.'\r\n 'Choosing instrument based on active date ranges')\r\n alternate_output = choose_inst(given_start_date,given_end_date)\r\n\r\n return(alternate_output)\r\n else:\r\n prime_inst = backup_inst\r\n\r\n break\r\n\r\n prime_inst = str(prime_inst).split('.')[0]\r\n\r\n #reformatting instrument name\r\n if len(prime_inst) == 2:\r\n inst_str = str(prime_inst)\r\n elif len(prime_inst) == 1:\r\n inst_str = '0' + str(prime_inst)\r\n\r\n print('GOES-%s is the primary instrument for given start time' %inst_str)\r\n\r\n #checking to make sure this primary instrument actually has data\r\n year = str(given_start_date).split('-')[0]\r\n month = str(given_start_date).split('-')[1]\r\n url = ('https://satdat.ngdc.noaa.gov/sem/goes/data/avg/'+ year + '/' +\r\n month + '/goes' + inst_str)\r\n\r\n try:\r\n request.urlopen(url)\r\n print('GOES-%s has data available' %inst_str)\r\n instrument = 'GOES-' + inst_str\r\n print('we are using %s as our instrument for observations' %instrument)\r\n\r\n except request.HTTPError:\r\n #if primary instrument doesn't have data for this date, using backup instrument\r\n print('GOES-%s does NOT have data available' %inst_str)\r\n\r\n #reformatting backup instrument\r\n if len(str(backup_inst)) == 2:\r\n inst_str = str(backup_inst)\r\n elif len(str(backup_inst)) ==1:\r\n inst_str = '0' + str(backup_inst)\r\n\r\n print('checking for data from backup instrument GOES-%s' %inst_str)\r\n\r\n url = ('https://satdat.ngdc.noaa.gov/sem/goes/data/avg/'+ year + '/'\r\n + month + '/goes' + inst_str)\r\n\r\n #checking to see if backup instrument has data for this date, if not have\r\n #to manually choose which instrument to use based off which instruments\r\n #have data available\r\n try:\r\n request.urlopen(url)\r\n print('backup instrument data found - using backup instrument')\r\n instrument = 'GOES-' + inst_str\r\n print('we are using %s as our instrument for observations'\r\n %instrument)\r\n\r\n except request.HTTPError:\r\n print('no knowledge of backup or primary instrument - choosing '\r\n 'instrument based on available data')\r\n alternate_output = choose_inst(given_start_date,given_end_date)\r\n\r\n return(alternate_output)\r\n\r\n return([instrument,end_date])", "def test_date_interval(self, init_date, end_date):\n self.calc_earning(self.security[(self.security['Date'] > init_date) &\n (self.security['Date'] < end_date)])", "def database_extraction(mod_start_time,mod_end_time,instrument_chosen,subevent_bool,\r\n detect_previous_event = False,thresholds='100,1',\r\n one_thresh = False):\r\n obs_file_created = False\r\n\r\n #extending time window\r\n window_end_time = (mod_end_time + timedelta(days=2))\r\n window_start_time = (mod_start_time - timedelta(days=2))\r\n \r\n #making a list of all dates within window\r\n day_list=[]\r\n for d in range(10):\r\n day_list.append((window_start_time + timedelta(days=d)).date())\r\n print('day list = %s' %day_list)\r\n \r\n print('determining if an instrument has been chosen')\r\n\r\n if instrument_chosen:\r\n #if an instrument has been chosen, checking to make sure it still works for this date\r\n if inst_end < window_end_time:\r\n instrument_chosen = False\r\n else:\r\n #if insturment hasn't been chosen, figuring out what it should be for given date\r\n try:\r\n #if instrument is specified in cfg using that\r\n instrument = cfg.instrument\r\n inst_end = datetime.today()\r\n print('using %s as our instrument for observations' %instrument)\r\n instrument_chosen = True\r\n\r\n except:\r\n #choosing instrument using function if not given in cfg\r\n instrument_stuff = choose_prime_inst(window_start_time.date(),\r\n window_end_time.date())\r\n instrument = instrument_stuff[0]\r\n #figuring out how long we can use this instrument\r\n inst_end = instrument_stuff[1]\r\n instrument_chosen = True\r\n \r\n #running katie's code to extract data using chosen instrument and dates\r\n print('extracting data from GOES website')\r\n \r\n #running for only one threshold if one_thresh is true, otherwise running for default\r\n #thresholds as well as any additional threshold given\r\n if one_thresh:\r\n one_sep.run_all(str(window_start_time), str(window_end_time), str(instrument),\r\n 'integral', '', '', True, detect_previous_event, thresholds) \r\n print('ran for threshold %s' %thresholds)\r\n else:\r\n if subevent_bool:\r\n thresholds = '10,1'\r\n #if event is a subevent, changing the threshold in katie's code to\r\n #10 MeV > 1pfu so that it will be recorded\r\n print('********************SUBEVENT**************************')\r\n sep.run_all(str(window_start_time), str(window_end_time), str(instrument),\r\n 'integral', '', '', True, detect_previous_event, thresholds)\r\n print('ran for subevent')\r\n else:\r\n #if an event, running with usual thresholds\r\n print('********************EVENT*****************************')\r\n sep.run_all(str(window_start_time), str(window_end_time),str(instrument), \r\n 'integral', '', '', True, detect_previous_event, thresholds)\r\n \r\n #reloading function so it doesn't keep old data \r\n reload(sep)\r\n \r\n #reformatting csv created from katie's code to json\r\n print('extracted - reformatting') \r\n for day in day_list: \r\n if not obs_file_created:\r\n #checking each day within the window to find the csv file if it hasn't\r\n #already been found\r\n print('thresholds: %s' %thresholds)\r\n \r\n if one_thresh:\r\n #name includes threshold if only ran for one threshold\r\n new_obs_name = ('sep_values_' + str(instrument) + '_integral_gt' +\r\n str(thresholds).split(',')[0] + '_' + str(thresholds).split(',')[1] + 'pfu_' +\r\n day.strftime('%Y_%m_%d').replace('_0','_') + '.csv')\r\n else:\r\n #otherwise only includes date ran for\r\n new_obs_name = ('sep_values_' + str(instrument) + '_integral_' +\r\n day.strftime('%Y_%m_%d').replace('_0','_') + '.csv')\r\n \r\n print('new_os_name %s' %new_obs_name) \r\n \r\n #checking if that file exists\r\n if os.path.exists(katies_path / new_obs_name):\r\n #if a file with this date exists, creating the corresponding json file\r\n \r\n #json name\r\n if one_thresh:\r\n obs_name = (str(instrument) + '_' + str(day) + 'only_' + str(thresholds).split(',')[0] + 'MeV_event.json')\r\n else:\r\n obs_name = (str(instrument) + '_' +\r\n str(day) + '.json')\r\n #creating json file\r\n obs_csv2json((katies_path / new_obs_name), obs_name,\r\n (ref_path/'example_sepscoreboard_json_file_v20190228.json'),\r\n instrument)\r\n \r\n print('obs file created')\r\n #file is created - will not run for anymore dates within window\r\n obs_file_created = True\r\n \r\n return(obs_name)\r\n else:\r\n print('no csv file found with this date, checking next one')", "def __init__(self, code, start_date=\"1900-01-01\", end_date=\"2020-01-01\"):\n base = Base()\n self.datas = base.getData(\n code=code, start_date=start_date, end_date=end_date)\n self._index = 0\n self.period = 14", "def rate_between(self, from_date, to_date):\n print(\"override the above\")", "def date_match(self,dateRange, input_frame):\n # find match with a exact date, output one element\n if dateRange['Start Date'] == dateRange['End Date']:\n # convert dtype to datetime64, match the data in dataframe\n exact_date = np.datetime64(dateRange['Start Date'])\n # key = column name, value = keyword\n target_time = input_frame[input_frame['Start Date'] == exact_date]\n # if search a range\n else:\n # only a start date or an end date entered\n if dateRange['Start Date'] == '' or dateRange['End Date'] == '':\n # only a start date input, then return the data from entered date to most recent\n if dateRange['End Date'] == '':\n start = np.datetime64(dateRange['Start Date'])\n target_time = input_frame[input_frame['Start Date'] >= start]\n # only an ende date input, then return the data before the entered date\n else:\n end = np.datetime64(dateRange['End Date'])\n target_time = input_frame[input_frame['Start Date'] <= end]\n # convert datatype to datetime64, match the data in dataframe\n else:\n start = np.datetime64(dateRange['Start Date'])\n end = np.datetime64(dateRange['End Date'])\n # mask target_time\n target_time = input_frame[(input_frame['Start Date'] <= end) & (input_frame['Start Date'] >= start)]\n # return filtered dataframe\n return target_time", "def returnDatesAndRegions(start=None, end=None, theRegs=None, isWeekly=False, isViral=False):\r\n\t# Default values\r\n\tregions = [\"global\", \"ad\", \"ar\", \"at\", \"au\", \"be\", \"bg\", \"bo\", \"br\", \"ca\", \"ch\", \"cl\", \"co\", \"cr\", \"cy\", \"cz\", \"de\", \"dk\", \"do\", \"ec\", \"ee\", \"es\", \"fi\", \"fr\", \"gb\", \"gr\", \"gt\", \"hk\", \"hn\", \"hu\", \"id\", \"ie\", \"il\", \"is\", \"it\", \"jp\", \"lt\", \"lu\", \"lv\", \"mc\", \"mt\", \"mx\",\"my\", \"ni\", \"nl\", \"no\", \"nz\", \"pa\", \"pe\", \"ph\", \"pl\", \"pt\", \"py\", \"ro\", \"se\", \"sg\", \"sk\", \"sv\", \"th\", \"tr\", \"tw\", \"us\", \"uy\", \"vn\"]\r\n\tviralWeeklyStart = \"2017-01-05\"\r\n\ttopWeeklyStart = \"2016-12-22\"\r\n\tallDailyStart = \"2017-01-01\"\r\n\r\n\t#Required since dates taken are very specific\r\n\tdefaultList = defaultListOfDates(isWeekly, isViral)\r\n\t#--------------------------------------------\r\n\r\n\t# Helper for Exception handling\r\n\tif(isWeekly and isViral):\r\n\t\tfunc = \"viral50Weekly\"\r\n\telif(isWeekly and not isViral):\r\n\t\tfunc = \"top200Weekly\"\r\n\telif(not isWeekly and isViral):\r\n\t\tfunc = \"viral50Daily\"\r\n\telif(not isWeekly and not isViral):\r\n\t\tfunc = \"top200Daily\"\r\n\t# \r\n\r\n\t# Start dates\r\n\tif(start is None): #From the beginning\r\n\t\tif(isWeekly):\r\n\t\t\tif(isViral):\r\n\t\t\t\tstart = datetime.datetime.strptime(viralWeeklyStart, \"%Y-%m-%d\")\r\n\t\t\telse:\r\n\t\t\t\tstart = datetime.datetime.strptime(topWeeklyStart, \"%Y-%m-%d\") \r\n\t\telse:\r\n\t\t\tstart = datetime.datetime.strptime(allDailyStart, \"%Y-%m-%d\")\r\n\telse:\r\n\t\tif(start in defaultList):\r\n\t\t\tstart = datetime.datetime.strptime(start, \"%Y-%m-%d\")\r\n\t\telse:\r\n\t\t\torderedList = sorted(defaultList, key=lambda x: datetime.datetime.strptime(x, \"%Y-%m-%d\") - datetime.datetime.strptime(start, \"%Y-%m-%d\"))\r\n\t\t\tclosest = [d for d in orderedList if d >= start]\r\n\t\t\tsuggest = closest[0:5]\r\n\t\t\tlogger.info(f\"The start date {start} provided for {func} is invalid. Wanna give one these a try? {suggest}\")\r\n\t\t\tchoice = input(\"Enter (1) to use the first suggestion, or (2) to quit and set yourself: \")\r\n\t\t\tif(int(choice) == 1):\r\n\t\t\t\tstart = datetime.datetime.strptime(suggest[0], \"%Y-%m-%d\")\r\n\t\t\telif(int(choice) == 2):\r\n\t\t\t\tsys.exit()\r\n\t\t\telse:\r\n\t\t\t\traise FyChartsException(\"Invalid Choice.\")\r\n\r\n\r\n\t# End dates\r\n\tif(end is None): #Up to now\r\n\t\tend = datetime.datetime.now()\r\n\telse:\r\n\t\tend = datetime.datetime.strptime(end, \"%Y-%m-%d\")\r\n\t\t\r\n\r\n\t# Region\r\n\tregion = []\r\n\tif(theRegs is None):\r\n\t\tregion = regions\r\n\telse:\r\n\t\tif(type(theRegs) is not list):\r\n\t\t\tregs = []\r\n\t\t\tregs.append(theRegs)\r\n\t\t\ttheRegs = regs\r\n\t\t\t\r\n\t\tfor aReg in theRegs:\r\n\t\t\tif(aReg in regions):\r\n\t\t\t\tregion.append(aReg)\r\n\t\t\telse:\r\n\t\t\t\traise FyChartsException(f\"Data for the region --> {aReg} <-- requested for {func} does not exist. Please try another region\")\r\n\r\n\t#Generate list of dates\r\n\tdates = [] \r\n\tif(isWeekly): \r\n\t\tif(isViral):\r\n\t\t\tgen = [start + datetime.timedelta(weeks=x) for x in range(0, (end-start).days+1)]\r\n\t\t\tfor date in gen:\r\n\t\t\t\tif(date<end):\r\n\t\t\t\t\tdt = date + datetime.timedelta(days=0)\r\n\t\t\t\t\tdates.append(dt.strftime(\"%Y-%m-%d\"))\r\n\t\telse:\r\n\t\t\tgen = [start + datetime.timedelta(weeks=x) for x in range(0, (end-start).days+1)]\r\n\t\t\tfor date in gen:\r\n\t\t\t\tif(date<end):\r\n\t\t\t\t\tdt = date + datetime.timedelta(days=0)\r\n\t\t\t\t\tdates.append(dt.strftime(\"%Y-%m-%d\"))\r\n\r\n\telse:\r\n\t\tgen = [start + datetime.timedelta(days=x) for x in range(0, (end-start).days+1)]\r\n\t\tfor date in gen:\r\n\t\t\tif(date<=end):\r\n\t\t\t\tdates.append(date.strftime(\"%Y-%m-%d\"))\r\n\r\n\tvar = {\"dates\": dates, \"region\": region}\r\n\treturn var", "def valuation(self, from_date=None):\n import pandas_datareader.data as pdr\n import datetime\n to_date = datetime.date.today()\n if not from_date: from_date = to_date - datetime.timedelta(days=1)\n px = pdr.DataReader(self.ticker, 'yahoo', from_date, to_date)\n\n f = self.Fundamentals\n\n print(\"OF COURSE \", 7, f, px)\n # for i in set(f.perod_end_date):", "def get_values_between_dates(self, date_start=None, date_end=None, dt_max=0.0, start_strict=False, end_strict=True):\n \n if start_strict:\n start_diff_operator = '>'\n else:\n start_diff_operator = '>='\n if end_strict:\n end_diff_operator = '<'\n else:\n end_diff_operator = '<='\n \n if dt_max < 0.:\n raise Exception('dt_max must be > 0')\n \n if (date_start is not None) and (date_end is not None):\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO WHERE datetime(date_data) %s datetime(?) AND datetime(date_data) %s datetime(?) ORDER BY datetime(date_data)\"%(start_diff_operator, end_diff_operator), \\\n params=[self.date2str(date_start-timedelta(dt_max)), self.date2str(date_end+timedelta(dt_max))])\n elif (date_start is not None):\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO WHERE datetime(date_data) %s datetime(?) ORDER BY datetime(date_data)\"%start_diff_operator, \\\n params=[self.date2str(date_start-timedelta(dt_max))])\n elif (date_end is not None):\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO WHERE datetime(date_data) %s datetime(?) ORDER BY datetime(date_data)\"%end_diff_operator, \\\n params=[self.date2str(date_end+timedelta(dt_max))])\n else:\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO ORDER BY datetime(date_data)\")", "def to_stock_data_range(self, start_date=None, end_date=None):\n # standardize dates\n if end_date is None:\n end_date = self.dates[-2]\n if type(end_date) is pd.tslib.Timestamp:\n end_date = end_date.strftime(\"%Y-%m-%d\")\n if type(end_date) is not datetime.datetime and type(end_date) is not pd.tslib.Timestamp:\n end_date = datetime.datetime.strptime(end_date, \"%Y-%m-%d\")\n try:\n end_date = self.dates[list(self.dates).index(end_date) + 1]\n except:\n end_date = \"Last\"\n\n if start_date is None:\n start_date = self.dates[0]\n if type(start_date) is not datetime.datetime and type(start_date) is not pd.tslib.Timestamp:\n start_date = datetime.datetime.strptime(start_date, \"%Y-%m-%d\")\n\n if end_date is \"Last\":\n dates = list(self.dates)[list(self.dates).index(start_date):]\n else:\n dates = list(self.dates)[list(self.dates).index(start_date):list(self.dates).index(end_date)]\n\n # find functions to set\n dataframes = [i for i in dir(self) if not callable(getattr(self, i)) and not i.startswith(\"__\")\n and type(getattr(self, i)) is pd.DataFrame]\n dictionaries = [i for i in dir(self) if not callable(getattr(self, i)) and not i.startswith(\"__\")\n and type(getattr(self, i)) is dict]\n constant_values = [i for i in dir(self) if not callable(getattr(self, i)) and not i.startswith(\"__\")\n and getattr(self, i) is not None and i not in dataframes and i not in dictionaries]\n\n # transfer new data\n new_stock_data = StockData()\n\n for i in constant_values:\n setattr(new_stock_data, i, getattr(self, i))\n\n for i in dataframes:\n if end_date is not \"Last\":\n setattr(new_stock_data, i, getattr(self, i).ix[start_date:end_date])\n else:\n setattr(new_stock_data, i, getattr(self, i).ix[start_date:])\n\n for i in dictionaries:\n new_dict = {}\n for d in dates:\n new_dict[d] = getattr(self, i)[d]\n setattr(new_stock_data, i, new_dict)\n\n new_stock_data.dates = dates\n new_stock_data.str_dates = [str(d)[:USEFUL_TIMESTAMP_CHARS] for d in dates]\n\n return new_stock_data", "def __init__(self, start_date_str: str, end_date_str: str):\r\n start_date, end_date = create_date_from_string(start_date_str, end_date_str)\r\n if is_date_valid(start_date, end_date):\r\n self.days_range_array = create_days_range(start_date, end_date)\r\n self.months_range_array = create_months_range(self.days_range_array)\r\n else:\r\n raise Exception", "def get_interest_variable(\n in_dataset, sensor_var, date_col, hr_col, numeric_var, target_sensor=\"A620\"\n):\n dataset_pproc = in_dataset.loc[\n in_dataset[sensor_var] == target_sensor, [date_col, hr_col] + [numeric_var]\n ]\n hrs_str = dataset_pproc[hr_col].to_string()\n dates_str = dataset_pproc[date_col]\n\n dataset_pproc[date_col] = pd.to_datetime(dataset_pproc[date_col])\n dataset_pproc.set_index([date_col, hr_col], inplace=True)\n dataset_pproc.fillna(method=\"ffill\", inplace=True)\n dataset_pproc.interpolate(method=\"linear\", axis=0)\n\n return dataset_pproc", "def __getQuerysetGivenInterval(model, start_date, end_date):\n cur_model = {\n 'donor': Donor,\n 'donation': Donation,\n 'item': Item\n }.get(model, Donor.objects.none())\n\n # might need following lines when changing back to created_at:\n # date_format = \"%Y-%m-%d\"\n # if start_date is not None:\n # timezone_unaware_start_date = datetime.strptime(start_date, date_format)\n # timezone_aware_start_date = pytz.utc.localize(timezone_unaware_start_date)\n #\n # if end_date is not None:\n # timezone_unaware_end_date = datetime.strptime(end_date, date_format)\n # timezone_aware_end_date = pytz.utc.localize(timezone_unaware_end_date).date()\n\n if start_date is not None and end_date is not None:\n return cur_model.objects.filter(documented_at__range=(start_date, end_date))\n elif start_date is not None and end_date is None:\n return cur_model.objects.filter(documented_at__gte=start_date)\n elif start_date is None and end_date is not None:\n return cur_model.objects.filter(documented_at__lte=end_date)\n else:\n return cur_model.objects.all()", "def __init__(__self__, *,\n end_date: str,\n start_date: str,\n time: str):\n pulumi.set(__self__, \"end_date\", end_date)\n pulumi.set(__self__, \"start_date\", start_date)\n pulumi.set(__self__, \"time\", time)", "def get_scns_for_date(self, date_of_interest, valid=True, ard_prod=True, platform=None):\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n\n if platform is None:\n if valid and ard_prod:\n scns = ses.query(EDDSentinel1ASF).filter(\n sqlalchemy.cast(EDDSentinel1ASF.Acquisition_Date, sqlalchemy.Date) == date_of_interest,\n EDDSentinel1ASF.Invalid == False, EDDSentinel1ASF.ARDProduct == True).all()\n elif valid:\n scns = ses.query(EDDSentinel1ASF).filter(\n sqlalchemy.cast(EDDSentinel1ASF.Acquisition_Date, sqlalchemy.Date) == date_of_interest,\n EDDSentinel1ASF.Invalid == False).all()\n elif ard_prod:\n scns = ses.query(EDDSentinel1ASF).filter(\n sqlalchemy.cast(EDDSentinel1ASF.Acquisition_Date, sqlalchemy.Date) == date_of_interest,\n EDDSentinel1ASF.ARDProduct == True).all()\n else:\n scns = ses.query(EDDSentinel1ASF).filter(\n sqlalchemy.cast(EDDSentinel1ASF.Acquisition_Date, sqlalchemy.Date) == date_of_interest).all()\n else:\n if valid and ard_prod:\n scns = ses.query(EDDSentinel1ASF).filter(\n sqlalchemy.cast(EDDSentinel1ASF.Acquisition_Date, sqlalchemy.Date) == date_of_interest,\n EDDSentinel1ASF.Invalid == False, EDDSentinel1ASF.ARDProduct == True,\n EDDSentinel1ASF.Platform == platform).all()\n elif valid:\n scns = ses.query(EDDSentinel1ASF).filter(\n sqlalchemy.cast(EDDSentinel1ASF.Acquisition_Date, sqlalchemy.Date) == date_of_interest,\n EDDSentinel1ASF.Invalid == False, EDDSentinel1ASF.Platform == platform).all()\n elif ard_prod:\n scns = ses.query(EDDSentinel1ASF).filter(\n sqlalchemy.cast(EDDSentinel1ASF.Acquisition_Date, sqlalchemy.Date) == date_of_interest,\n EDDSentinel1ASF.ARDProduct == True, EDDSentinel1ASF.Platform == platform).all()\n else:\n scns = ses.query(EDDSentinel1ASF).filter(\n sqlalchemy.cast(EDDSentinel1ASF.Acquisition_Date, sqlalchemy.Date) == date_of_interest,\n EDDSentinel1ASF.Platform == platform).all()\n return scns", "def find_within_dates(self,\r\n datefrom=(1,1,1),\r\n dateto=(3000,12,31),\r\n withinrange=None,\r\n orequal=False,\r\n most_recent=False):\r\n\r\n def convert (date):\r\n\r\n if isinstance(date,str):\r\n #If input is a string convert to a tuple\r\n date += '-01-01'\r\n date = datefrom.split(DASH)\r\n year, month, day = date[0].replace(PLUS,DASH), date[1], date[2]\r\n date = int(year), int(month), int(day)\r\n if isinstance(date, (list,tuple)):\r\n #If a tuple, convert to a datetime object\r\n date = datetime.datetime(date[0],date[1],date[2])\r\n return date\r\n\r\n if withinrange is None:\r\n #If not range assigned, default to all indexes\r\n withinrange = self.indexes()\r\n\r\n datefrom = convert(datefrom)\r\n dateto = convert(dateto)\r\n\r\n\r\n if not orequal:\r\n return [a_temp for a_temp in withinrange\r\n if self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True)> datefrom\r\n and self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True) < dateto]\r\n return [a_temp for a_temp in withinrange\r\n if self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True) >= datefrom and\r\n self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True) <= dateto]", "def __init__(__self__, *,\n end_date: Optional[str] = None,\n start_date: Optional[str] = None,\n term_unit: Optional[str] = None):\n if end_date is not None:\n pulumi.set(__self__, \"end_date\", end_date)\n if start_date is not None:\n pulumi.set(__self__, \"start_date\", start_date)\n if term_unit is not None:\n pulumi.set(__self__, \"term_unit\", term_unit)", "def __init__(__self__, *,\n end_date: Optional[str] = None,\n start_date: Optional[str] = None,\n term_unit: Optional[str] = None):\n if end_date is not None:\n pulumi.set(__self__, \"end_date\", end_date)\n if start_date is not None:\n pulumi.set(__self__, \"start_date\", start_date)\n if term_unit is not None:\n pulumi.set(__self__, \"term_unit\", term_unit)", "def filter_data_by_date(df, ticker, start_date, end_date):\n if start_date is None:\n start_date = MIN_DATE\n\n if end_date is None:\n end_date = MAX_DATE\n\n filtered = df[\n (df[\"ticker\"] == ticker) & (df[\"date\"] >= start_date) & (df[\"date\"] <= end_date)\n ]\n return filtered", "def visitRange(self, date):\n raise NotImplementedError()", "def resampleDataSet(dailyData, resampleString, resampleMethod, customFunction = None):\n\n # Make sure the index is sorted\n dailyData.sort_index(level='Datetime', inplace=True)\n\n # Get today's date\n today = datetime.now()\n\n # Create a new empty series\n resampleData = pd.Series([], index = pd.DatetimeIndex([]))\n\n # Get information about the daily data\n firstDate = dailyData.index[0][0]\n\n # Parse the resample string\n resampleList = resampleString.split('/') # Converts 'R/1978-10-01/P1M/F1Y' into ['R', '1978-10-01', 'P1M', 'F1Y', 'S1Y']\n\n # Validate the list\n if resampleList[0] != 'R' or len(resampleList[1]) != 10 or resampleList[2][0] != 'P' or resampleList[3][0] != 'F': #or len(resampleList) != 4\n return resampleData, 1, 'Invalid Resample String. Format should be similar to R/1978-10-01/P1M/F1Y or R/1978-10-01/P1M/F1Y/S1Y'\n \n # Validate the resample method\n if resampleMethod not in ['accumulation', 'accumulation_cfs_kaf', 'average', 'first', 'last', 'max', 'min', 'custom', 'median']:\n return resampleData, 1, \"Invalid resampling method. Provide one of 'accumulation', 'accumulation_cfs_kaf', 'average', 'first', 'last', 'max', 'min', 'custom', 'median'\"\n\n # Parse into values\n startDate = datetime.strptime(resampleList[1], '%Y-%m-%d') # >>> datetime.date(1978, 10, 1)\n period = isodate.parse_duration(resampleList[2]) # >>> isodate.duration.Duration(0, 0, 0, years=0, months=1)\n # Change the period to 1 day if the resample method is 'first'\n if resampleMethod == 'first':\n period = isodate.parse_duration(\"P1D\")\n frequency = isodate.parse_duration(resampleList[3].replace('F', 'P')) # >>> isodate.duration.Duration(0, 0, 0, years=1, months=1)\n\n # Create all the periods\n periods = []\n tracker = startDate\n while tracker <= today: # >>> periods = [(datetime.datetime(1978-10-01), datetime.datetime(1978-11-01))]\n periods.append((tracker, tracker+period))\n tracker += frequency\n\n # Parse the function\n func = lambda x: np.nan if x.isnull().all() else (np.nanmean(x) if resampleMethod == 'average' else (\n np.nansum(x) if resampleMethod == 'accumulation' else (\n 86400*(1/43560000)*np.nansum(x) if resampleMethod == 'accumulation_cfs_kaf' else (\n x.iloc[0] if resampleMethod == 'first' else (\n x.iloc[-1] if resampleMethod == 'last' else (\n np.nanmedian(x) if resampleMethod == 'median' else (\n np.nanmax(x) if resampleMethod == 'max' else (\n np.nanmin(x) if resampleMethod == 'min' else eval(customFunction)))))))))\n\n # Resample the data\n for idx in pd.IntervalIndex.from_tuples(periods):\n data = dailyData.loc[idx.left : idx.right]\n if resampleMethod != 'first' and resampleMethod != 'last':\n data.isMostlyThere = len(data) > int(0.95*(idx.right-idx.left).days) # Check to make sure 95% of data is there!\n else:\n data.isMostlyThere = True\n resampleData.loc[idx.left] = ( func(data) if (idx.right >= firstDate and today >= idx.right and (data.isMostlyThere)) else np.nan )\n\n if len(resampleList) == 5:\n shiftStrings = list(resampleList[4])\n if shiftStrings[1].isdigit():\n resampleData.index = resampleData.index + pd.offsets.DateOffset(years=int(shiftStrings[1]))\n else:\n return resampleData, 1, \"Invalid Resample String. Format should be similar to R/1978-10-01/P1M/F1Y or R/1978-10-01/P1M/F1Y/S1Y\"\n\n\n # Name the dataframe\n resampleData.name = dailyData.name + '_' + resampleList[1] + '_' + resampleList[2] + '_' + resampleList[3] + '_' + resampleMethod + '_' + str(customFunction)\n\n return resampleData", "def date_range(start, end):\n \"\"\"between the start and end date inclusive.\"\"\"\n # Create a link to the session\n session = Session(engine)\n \n # Get the start and end date of the data\n final_date = session.query(Measurements.date).order_by(Measurements.date.desc()).first()[0]\n first_date = session.query(Measurements.date).order_by(Measurements.date.asc()).first()[0]\n \n # Make sure dates are in range of available data\n if (start > final_date) or (start < first_date) or (end > final_date) or (end < first_date) or (start>end):\n return f\"{start} - {end} is not a proper date range.</br>Try dates between {first_date} - {final_date}\"\n\n # Query the min, avg, and max temps for the given timeframe\n results = []\n while start <= end:\n min_temp = session.query(func.min(Measurements.tobs)).filter(Measurements.date==start).first()[0]\n avg_temp = session.query(func.avg(Measurements.tobs)).filter(Measurements.date==start).first()[0]\n max_temp = session.query(func.max(Measurements.tobs)).filter(Measurements.date==start).first()[0]\n \n # Store the information retrieved\n results.append([start, min_temp, avg_temp, max_temp])\n \n # Update the date to check the next record\n date1 = start.split(\"-\")\n date1 = dt.date(int(date1[0]), int(date1[1]), int(date1[2])) + dt.timedelta(days=1)\n start = date1.strftime(\"%Y-%m-%d\")\n\n session.close()\n\n # Create a dictionary from the query results\n date_temps = []\n for date, min_temp, avg_temp, max_temp in results:\n date_temps_dict = {}\n date_temps_dict[\"date\"] = date\n date_temps_dict[\"min_temp\"] = min_temp\n date_temps_dict[\"avg_temp\"] = round(avg_temp, 2)\n date_temps_dict[\"max_temp\"] = max_temp\n date_temps.append(date_temps_dict)\n \n return jsonify(date_temps)", "def test_new_items_have_increasing_dates(self):\n input_ = [\n self.indicator_record(date=datetime.date(2004, 11, 1), value=0.69),\n self.indicator_record(date=datetime.date(2004, 12, 1), value=0.86),\n self.indicator_record(date=datetime.date(2005, 1, 1), value=0.58),\n ]\n records = self.expander._ipca_from_15_expander(input_)\n\n self.assertTrue(records[-1].date > input_[-1].date)", "def date_range(self):\n start_date = input(\"Enter a start date in the format DD/MM/YYYY> \")\n end_date = input(\"Enter an end date in the format DD/MM/YYYY> \")\n return start_date, end_date", "def test_time_series_intraday_date_integer(self, mock_request):\n ts = TimeSeries(key=TestAlphaVantage._API_KEY_TEST,\n output_format='pandas', indexing_type='integer')\n url = \"http://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=1min&outputsize=full&apikey=test&datatype=json\"\n path_file = self.get_file_from_url(\"mock_time_series\")\n with open(path_file) as f:\n mock_request.get(url, text=f.read())\n data, _ = ts.get_intraday(\n \"MSFT\", interval='1min', outputsize='full')\n assert type(data.index[0]) == int", "def _get_output_date_range_for(self, from_input_dt, to_input_dt):\n return from_input_dt, to_input_dt", "def date_range_filter(dr):\n assert IDateRange.providedBy(dr) or IDateRangeFactory.providedBy(dr)\n if IDateRangeFactory.providedBy(dr):\n dr = dr(datetime.now())\n factory = queryUtility(IFactory, dottedname(IQueryFilter))\n if factory is None:\n return ComponentLookupError('cannot find factory for query filter')\n return factory(value=(dr.start, dr.end), query_range=dr.query_range)", "def test_fill_data_with_one_date(self):\n # date = pd.to_datetime('2015-06-30')\n date = pd.to_datetime('2011-05-09')\n print 'testing date: %s' % date.strftime('%Y-%m-%d')\n self.full_iv.get_data()\n\n # df_date = self.full_iv.df_all.query('date == %r' % date)\n # df_date = df_date[['date', 'dte', 'mark', 'strike', 'impl_vol']]\n # print df_date.sort_values(['dte', 'strike']).to_string(line_width=1000)\n\n self.full_iv.df_stock = self.full_iv.df_stock[date:date]\n df_iv = self.full_iv.calc_iv()\n\n print df_iv\n\n self.assertTrue(len(df_iv))", "def test_rise_timeseries_with_expert_model_for_correct_max_and_min():\n hot_day_index = 6\n cold_day_index = 12\n temperature_timeseries = average_temperature_timeseries_with_1_cold_and_1_hot_day(cold_day_index, hot_day_index)\n\n summer_explanation, winter_explanation = dianna.explain_timeseries(run_expert_model,\n timeseries_data=temperature_timeseries,\n method='rise',\n labels=[0, 1],\n p_keep=0.1, n_masks=10000,\n mask_type=input_train_mean)\n\n assert np.argmax(summer_explanation) == hot_day_index\n assert np.argmin(summer_explanation) == cold_day_index\n assert np.argmax(winter_explanation) == cold_day_index\n assert np.argmin(winter_explanation) == hot_day_index", "def test_output_day(self):\n input_ = [\n self.indicator_record(date=datetime.date(2011, 1, 1), value=0.83),\n self.indicator_record(date=datetime.date(2011, 2, 1), value=0.80),\n ]\n output = self.expander._ipca_from_15_expander(input_)\n\n self.assertEqual(output[-1].date.day, 1)", "def _get_input_date_range_for(self, from_output_dt, to_output_dt):\n # If comb is adaptive, the required input date range needs to account for the time window\n if self.is_adaptive:\n if from_output_dt is None:\n return from_output_dt, to_output_dt\n return from_output_dt-timedelta(days=self.time_window), to_output_dt\n # Otherwise, the comb is already trained and does not need to fill up the time window first\n return from_output_dt, to_output_dt", "def __init__(\n self,\n name: str,\n source: str,\n start_date: np.ndarray,\n end_date: np.ndarray,\n ):\n super().__init__(name, source, start_date)\n self.end_date = end_date", "def get_series(self, series_code: str, date: datetime):\n\n raise NotImplementedError", "def from_date(cls, d):\n raise NotImplementedError", "def select_data(data=pd.DataFrame(), date_initial=\"2005-01-01\", date_final=\"2019-12-31\"):\n data = data[data.index >= date_initial]\n data = data[data.index <= date_final]\n return data", "def getExerciseDate(self, ins):\n if ins.insid in self.insDates:\n return self.insDates[ins.insid]\n\n exotic = ins.exotics()[0] if ins.exotics() else None\n isKnockIn = 0\n isOneTouch = 0\n isKnockOut = 0\n isNoTouch = 0\n if exotic:\n isKnockIn, isKnockOut, isOneTouch, isNoTouch = \\\n getOptionType(exotic)\n # get the exercise date\n if (exotic and isBarrier(ins) and (isKnockIn or isOneTouch or\n isKnockOut or isNoTouch) and\n exotic.barrier_cross_date and\n exotic.barrier_crossed_status == 'Confirmed'):\n date = exotic.barrier_cross_date\n elif ins.exp_day and ins.exp_day <= ael.date_today():\n date = ins.exp_day\n else:\n date = ael.date_today()\n t = ael.date(time.strftime('%Y %m %d', time.localtime(ins.exp_time)))\n if t <= date and t > ael.date('1970-01-02'):\n date = t\n self.insDates[ins.insid] = date\n return date", "def slice(self, start_date, end_date = None):\n\n if end_date is None:\n end_date = self.series.index[-1]\n self.series = self.series.loc[start_date:end_date]", "def visitInterpreted(self, date):\n raise NotImplementedError()", "def working_data(df, date_of_interest, lower_window, upper_window):\n\n # Actual dates we are interested in\n lower_date = date_of_interest - timedelta(days=lower_window)\n upper_date = date_of_interest + timedelta(days=upper_window)\n\n # Specs want us to call more than that\n lower_date_extreme = date_of_interest - timedelta(days=(2 * lower_window + 1))\n upper_date_extreme = date_of_interest + timedelta(days=(2 * upper_window))\n\n # Tighten to the range we want (and show non-trading days too)\n df = df.reindex(pd.date_range(lower_date_extreme, upper_date_extreme, freq='D'))\n df = df.rename(columns={'volume': 'Volume'})\n df['Volume'] = df['Volume'].fillna(0)\n df['close'] = df['close'].fillna(method='ffill')\n\n # Tag with relative dates\n df = df.apply(tag_relative_date, axis=1, args=(date_of_interest, lower_date, upper_date))\n\n # Calculate the data we want\n df['Return'] = df['close'].diff()\n df['Return_pct'] = df['close'].pct_change()\n df['Daily_Spread'] = df['high'] - df['low']\n df['Daily_Spread'] = df['Daily_Spread'].fillna(0)\n\n return df", "def classify_instrument(self):\n print(self._identity)\n if self._identity in multimeters:\n print('Instrument in multimeter list')\n return Multimeter.from_serial_instrument(self)\n elif self._identity in function_generators:\n print('Instrument in function generator list')\n return FunctionGenerator.from_serial_instrument(self)\n elif self._identity in power_supplies:\n print('Instrument in power supply list')\n return PowerSupply.from_serial_instrument(self)\n else:\n return None", "def test_time_series_intraday_date_indexing(self, mock_request):\n ts = TimeSeries(key=TestAlphaVantage._API_KEY_TEST,\n output_format='pandas', indexing_type='date')\n url = \"http://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=1min&outputsize=full&apikey=test&datatype=json\"\n path_file = self.get_file_from_url(\"mock_time_series\")\n with open(path_file) as f:\n mock_request.get(url, text=f.read())\n data, _ = ts.get_intraday(\n \"MSFT\", interval='1min', outputsize='full')\n if sys.version_info[0] == 3:\n assert isinstance(data.index[0], str)\n else:\n assert isinstance(data.index[0], basestring)", "def IRIS_ARC_IC(input, clients):\n \n if input[clients + '_ic_auto'] == 'Y':\n global events \n Period = input['min_date'].split('T')[0] + '_' + \\\n input['max_date'].split('T')[0] + '_' + \\\n str(input['min_mag']) + '_' + str(input['max_mag'])\n eventpath = os.path.join(input['datapath'], Period)\n address = eventpath\n elif input[clients + '_ic'] != 'N':\n address = input[clients + '_ic']\n \n events, address_events = quake_info(address, 'info')\n \n for i in range(0, len(events)):\n sta_ev = read_station_event(address_events[i])\n ls_saved_stas = []\n \n for j in range(0, len(sta_ev[0])):\n if clients == sta_ev[0][j][13]:\n station_id = sta_ev[0][j][0] + '.' + sta_ev[0][j][1] + '.' + \\\n sta_ev[0][j][2] + '.' + sta_ev[0][j][3]\n ls_saved_stas.append(os.path.join(address_events[i], 'BH_RAW',\\\n station_id))\n \n print 'event: ' + str(i+1) + '/' + str(len(events)) + \\\n ' -- ' + clients\n print '------------------------------------'\n inst_correct(input, ls_saved_stas, address_events[i], clients) \n \n print \"**********************************\"\n print clients.upper() + ' Instrument Correction is DONE'\n print \"**********************************\"", "def billing_choose_dates(self):\n number_of_dates_to_be_generated_per_patient = (\n self.number_of_dates_to_be_generated_per_patient\n )\n dunning_cycle_length = self.dunning_cycle_length\n dates = self.dates\n first_date = random.choice(\n dates\n ) # randomly choose a start date from the list of possible start dates\n last_possible_date = first_date + datetime.timedelta(\n days=dunning_cycle_length\n ) # calculate the last date possible based on Dunnin Cycle\n time_between_dates = last_possible_date - first_date\n subsequent_events = random.sample(\n list(np.arange(0, time_between_dates.days)),\n number_of_dates_to_be_generated_per_patient,\n )\n subsequent_events.sort()\n dates = [\n first_date + datetime.timedelta(days=np.int(subsequent_event))\n for subsequent_event in subsequent_events\n ]\n event_list = pd.DataFrame(dates)\n return event_list", "def define_secdate(self):\r\n \r\n # Since 2017\r\n self.start_date = datetime.datetime(2017,1,1) + (datetime.datetime(2017,12,31) - datetime.datetime(2017,1,1))/2 \r\n self.end_date = datetime.datetime(2050,1,1)\r\n self.ktime = (self.end_date - self.start_date).days + 1\r\n self.date = np.zeros(self.ktime,dtype=datetime.datetime)\r\n self.t = np.zeros(self.ktime)\r\n self.dt = 1/365.25\r\n \r\n for k in range(0,self.ktime):\r\n \r\n self.date[k] = self.start_date + datetime.timedelta(days=self.t[k]*365.25)\r\n\r\n if k < self.ktime-1:\r\n \r\n self.t[k+1] = self.t[k] + self.dt\r\n \r\n # Since 1990\r\n self.start_date_hist = datetime.datetime(1990,1,1) + (datetime.datetime(1990,12,31) - datetime.datetime(1990,1,1))/2 \r\n self.ktime_1990_2050 = (self.end_date - self.start_date_hist).days + 1\r\n self.date_1990_2050 = np.zeros(self.ktime_1990_2050,dtype=datetime.datetime)\r\n self.t_1990_2050 = np.zeros(self.ktime_1990_2050)\r\n \r\n for k in range(0,self.ktime_1990_2050):\r\n \r\n self.date_1990_2050[k] = self.start_date_hist + datetime.timedelta(days=self.t_1990_2050[k]*365.25)\r\n \r\n if (self.date_1990_2050[k].year == self.start_date.year and self.date_1990_2050[k].month == self.start_date.month and self.date_1990_2050[k].day == self.start_date.day):\r\n \r\n self.ktime_proj_crossing = k\r\n \r\n \r\n if k < self.ktime-1:\r\n \r\n self.t_1990_2050[k+1] = self.t_1990_2050[k] + self.dt \r\n \r\n return", "def set_date_range(self, start_date, end_date):\n self._validate_date_range(start_date, end_date)\n self.start_date = pd.Timestamp(start_date)\n self.end_date = pd.Timestamp(end_date)", "def simulator_from_instrument(instrument):\r\n\r\n grid = grid_from_instrument(instrument=instrument)\r\n psf = psf_from_instrument(instrument=instrument)\r\n\r\n if instrument in \"vro\":\r\n return ag.SimulatorImaging(\r\n exposure_time_map=ag.Array2D.full(\r\n fill_value=100.0, shape_native=grid.shape_native\r\n ),\r\n psf=psf,\r\n background_sky_map=ag.Array2D.full(\r\n fill_value=1.0, shape_native=grid.shape_native\r\n ),\r\n add_poisson_noise=True,\r\n )\r\n elif instrument in \"euclid\":\r\n return ag.SimulatorImaging(\r\n exposure_time_map=ag.Array2D.full(\r\n fill_value=2260.0, shape_native=grid.shape_native\r\n ),\r\n psf=psf,\r\n background_sky_map=ag.Array2D.full(\r\n fill_value=1.0, shape_native=grid.shape_native\r\n ),\r\n add_poisson_noise=True,\r\n )\r\n elif instrument in \"hst\":\r\n return ag.SimulatorImaging(\r\n exposure_time_map=ag.Array2D.full(\r\n fill_value=2000.0, shape_native=grid.shape_native\r\n ),\r\n psf=psf,\r\n background_sky_map=ag.Array2D.full(\r\n fill_value=1.0, shape_native=grid.shape_native\r\n ),\r\n add_poisson_noise=True,\r\n )\r\n elif instrument in \"hst_up\":\r\n return ag.SimulatorImaging(\r\n exposure_time_map=ag.Array2D.full(\r\n fill_value=2000.0, shape_native=grid.shape_native\r\n ),\r\n psf=psf,\r\n background_sky_map=ag.Array2D.full(\r\n fill_value=1.0, shape_native=grid.shape_native\r\n ),\r\n add_poisson_noise=True,\r\n )\r\n elif instrument in \"ao\":\r\n return ag.SimulatorImaging(\r\n exposure_time_map=ag.Array2D.full(\r\n fill_value=1000.0, shape_native=grid.shape_native\r\n ),\r\n psf=psf,\r\n background_sky_map=ag.Array2D.full(\r\n fill_value=1.0, shape_native=grid.shape_native\r\n ),\r\n add_poisson_noise=True,\r\n )\r\n else:\r\n raise ValueError(\"An invalid instrument was entered - \", instrument)", "def get_dummy_data(num_days, low, high, end_date='1970-01-01'):\n step = (high - low) / (num_days - 1)\n ref = datetime.strptime(end_date, '%Y-%m-%d').date()\n start_dt = ref - timedelta(days=(num_days - 1))\n end_dt = ref + timedelta(days=1)\n ts = np.arange(start_dt, end_dt, timedelta(days=1)).astype(date)\n df = pd.DataFrame(data={'price': np.arange(low, high + 1, step)}, index=pd.DatetimeIndex(ts))\n df.index.name = 'date'\n return df", "def _exclude_dates(self, X, y, exclude_dates):\n self.exclude_dates = exclude_dates\n if len(self.exclude_dates) != 0:\n for exclude_date_range in self.exclude_dates:\n t0,t1 = [datetimeify(dt) for dt in exclude_date_range]\n inds = (y.index<t0)|(y.index>=t1)\n X = X.loc[inds]\n y = y.loc[inds]\n return X,y", "def loop_observations ( self, start_date, end_date, step=1, fmt=\"%Y-%m-%d\" ):\n\n start_date = datetime.datetime.strptime( start_date, fmt )\n end_date = datetime.datetime.strptime( end_date, fmt )\n if start_date < self.date[0]:\n print \"No observations until %s, starting from there\" % self.date[0]\n start_date = self.date[0]\n\n if end_date > self.date[-1]:\n print \"No observations after %s, stopping there\" % self.date[-1]\n end_date = self.date[-1]\n\n delta = datetime.timedelta ( days=step )\n this_date = start_date.date()\n end_date = end_date.date() + delta\n obs_dates = [ x.date() for x in self.date ]\n while this_date < end_date:\n if this_date in obs_dates:\n iloc = obs_dates.index ( this_date )\n have_obs = True\n the_data = self._data_pntr[iloc].ReadAsArray()\n try:\n the_mask = map ( *self.masks[iloc] )\n except:\n the_mask = self.get_mask ( iloc )\n the_emulator = self.emulator[ iloc ]\n the_sza = self.sza[ iloc ]\n the_saa = self.saa[ iloc ]\n the_vza = self.vza[ iloc ]\n the_vaa = self.vaa[ iloc ]\n the_fname = self._data_pntr[iloc].GetDescription()\n try:\n the_sensor = self.sensor[iloc]\n except:\n the_sensor = self.sensor\n try:\n the_spectrum = self.spectral[iloc]\n except:\n the_spectrum = self.spectral\n\n else:\n have_obs = False\n the_data = None\n the_mask = None\n the_emulator = None\n the_sza = None\n the_saa = None\n the_vza = None\n the_vaa = None\n the_fname = None\n the_spectrum = None\n the_sensor = None\n this_date += delta\n retval = namedtuple ( \"retval\", [\"have_obs\", \"sensor\", \"date\", \"image\", \"mask\", \"emulator\",\n \"sza\", \"saa\", \"vza\", \"vaa\", \"fname\", \"spectrum\"] )\n retvals = retval ( have_obs=have_obs, sensor=the_sensor, \n date=this_date - delta, image=the_data, mask=the_mask, emulator=the_emulator, sza=the_sza,\n saa=the_saa, vza=the_vza, vaa=the_vaa, fname=the_fname, spectrum=the_spectrum )\n yield retvals", "def open(restrictions=None):\n # All config values for implemented instruments should be called\n if restrictions is None:\n restrictions = {}\n\n com_ports = restrictions.get('com_ports', None)\n baud_rates = restrictions.get('baud_rates', None)\n classes = fixate.config.DRIVERS.get(\"PPS\", {})\n\n instruments = filter_connected(fixate.config.INSTRUMENTS or {}, classes)\n if not instruments:\n # All discovery methods for implemented instruments should be called\n discover_visa()\n discover_serial(classes, com_ports=com_ports, baud_rates=baud_rates)\n instruments = filter_connected(fixate.config.INSTRUMENTS or {}, classes)\n # This is where the restrictions would come in\n if instruments:\n for instr in instruments:\n return instruments[instr]\n raise InstrumentNotConnected(\"No valid {} found\".format(\"PPS\"))", "def _rate_dates(self, common_object):\n if common_object.IsKindOf(acm.FCashFlow):\n start_date = common_object.StartDate()\n elif common_object.IsKindOf(acm.FReset):\n start_date = common_object.Day()\n else:\n message = \"Rate dates for {0} object are not defined\".format(\n type(common_object))\n raise ProvisionHandlerError(message)\n\n end_date = acm.Time().DateAddDelta(start_date, 0, 3, 0)\n end_date = self._adjust_to_banking_day(end_date)\n\n return (start_date, end_date)", "def new_instrument(self, instrument_type):\r\n return self.instrument_list[instrument_type](instrument_type,\r\n self.midi_output)", "def update_dates(start_date, end_date, freq):\n if (freq == \"MS\") or (freq == \"M\"):\n try:\n start_date = start_date.split(\"/\")\n end_date = end_date.split(\"/\")\n except AttributeError:\n start_date = [start_date.month, start_date.day, start_date.year]\n end_date = [end_date.month, end_date.day, end_date.year]\n if int(end_date[1]) < 22:\n\n if int(end_date[0]) == 1:\n end_month = 12\n end_year = int(end_date[2]) - 1\n else:\n end_month = int(end_date[0]) - 1\n end_year = end_date[2]\n\n end_date[0] = end_month\n end_date[2] = end_year\n\n start_date = pd.to_datetime(f\"{start_date[0]}/01/{start_date[2]}\")\n\n end_date = pd.to_datetime(\n f\"{end_date[0]}/{calendar.monthrange(int(end_date[2]),int(end_date[0]))[1]}/{end_date[2]}\"\n )\n\n if (freq == \"QS\") or (freq == \"Q\"):\n start_date = (pd.to_datetime(start_date) + pd.tseries.offsets.DateOffset(days=1)) - pd.offsets.QuarterBegin(\n startingMonth=1\n )\n end_date = (pd.to_datetime(end_date) + pd.tseries.offsets.DateOffset(days=1)) - pd.offsets.QuarterEnd()\n\n return (start_date, end_date)", "def test_aggr_date_input(self):\n\n actual_start_date = set([])\n actual_end_date = set([])\n for year in self.years:\n for my_date in self.dates:\n input_date = date(year, my_date[0], my_date[1])\n retail_date = RetailDate(input_date)\n actual_start_date.add(retail_date.year_start_date)\n actual_end_date.add(retail_date.year_end_date)\n\n # Verify the retail start dates\n expected_start = set([date(mTup[0], mTup[1], mTup[2]) for mTup in self.retail_start_dates])\n diff = expected_start.symmetric_difference(actual_start_date)\n self.assertEqual(len(diff), 0, \"Diff: \" + str(diff))\n\n # Verify the retail end dates\n expected_end = set([date(mTup[0], mTup[1], mTup[2]) for mTup in self.retail_end_dates])\n diff = expected_end.symmetric_difference(actual_end_date)\n self.assertEqual(len(diff), 0, \"Diff: \" + str(diff))", "def test_initial_records_are_preserved(self):\n input_ = [\n self.indicator_record(date=datetime.date(1998, 5, 1), value=0.50),\n self.indicator_record(date=datetime.date(1998, 6, 1), value=0.02),\n self.indicator_record(date=datetime.date(1998, 7, 1), value=-0.12),\n ]\n records = self.expander._ipca_from_15_expander(input_)\n\n same_date_values = [record.date == records[index_].date and\n record.value == records[index_].value\n for index_, record in enumerate(input_)]\n\n self.assertTrue(all(same_date_values))", "def <start>/<end>(<start>/<end>)\ndef calc_temps(start_date, end_date):", "def __init__(self, name, value, start_date, end_date, period, interest): \n SavingPlan.__init__(self, name, value, start_date, end_date, period)\n self.interest = interest", "def is_date_range(self, is_date_range):\n\n self._is_date_range = is_date_range", "def retrieve_observations_and_simsurvey(\n session,\n start_date,\n end_date,\n localization_id,\n instrument_id,\n survey_efficiency_analysis_id,\n survey_efficiency_analysis_type,\n):\n\n if survey_efficiency_analysis_type == \"SurveyEfficiencyForObservations\":\n survey_efficiency_analysis = session.scalars(\n sa.select(SurveyEfficiencyForObservations).where(\n SurveyEfficiencyForObservations.id == survey_efficiency_analysis_id\n )\n ).first()\n if survey_efficiency_analysis is None:\n raise ValueError(\n f'No SurveyEfficiencyForObservations with ID {survey_efficiency_analysis_id}'\n )\n elif survey_efficiency_analysis_type == \"SurveyEfficiencyForObservations\":\n survey_efficiency_analysis = session.scalars(\n sa.select(SurveyEfficiencyForObservationPlan).where(\n SurveyEfficiencyForObservationPlan.id == survey_efficiency_analysis_id\n )\n ).first()\n if survey_efficiency_analysis is None:\n raise ValueError(\n f'No SurveyEfficiencyForObservationPlan with ID {survey_efficiency_analysis_id}'\n )\n else:\n raise ValueError(\n 'survey_efficiency_analysis_type must be SurveyEfficiencyForObservations or SurveyEfficiencyForObservationPlan'\n )\n\n payload = survey_efficiency_analysis.payload\n\n instrument = session.scalars(\n sa.select(Instrument)\n .options(joinedload(Instrument.telescope))\n .where(Instrument.id == instrument_id)\n ).first()\n\n localization = session.scalars(\n sa.select(Localization).where(Localization.id == localization_id)\n ).first()\n\n data = get_observations(\n session,\n start_date,\n end_date,\n telescope_name=instrument.telescope.name,\n instrument_name=instrument.name,\n localization_dateobs=localization.dateobs,\n localization_name=localization.localization_name,\n localization_cumprob=payload[\"localization_cumprob\"],\n )\n\n observations = data[\"observations\"]\n\n if len(observations) == 0:\n raise ValueError('Need at least one observation to run SimSurvey')\n\n unique_filters = list({observation[\"filt\"] for observation in observations})\n\n if not set(unique_filters).issubset(set(instrument.sensitivity_data.keys())):\n raise ValueError('Need sensitivity_data for all filters present')\n\n for filt in unique_filters:\n if not {'exposure_time', 'limiting_magnitude', 'zeropoint'}.issubset(\n set(instrument.sensitivity_data[filt].keys())\n ):\n raise ValueError(\n f'Sensitivity_data dictionary missing keys for filter {filt}'\n )\n\n # get height and width\n stmt = (\n InstrumentField.select(session.user_or_token)\n .where(InstrumentField.id == observations[0][\"field\"][\"id\"])\n .options(undefer(InstrumentField.contour_summary))\n )\n field = session.scalars(stmt).first()\n if field is None:\n raise ValueError(\n 'Missing field {obs_dict[\"field\"][\"id\"]} required to estimate field size'\n )\n contour_summary = field.to_dict()[\"contour_summary\"][\"features\"][0]\n coordinates = np.squeeze(np.array(contour_summary[\"geometry\"][\"coordinates\"]))\n coords = SkyCoord(\n coordinates[:, 0] * u.deg, coordinates[:, 1] * u.deg, frame='icrs'\n )\n width, height = None, None\n for c1 in coords:\n for c2 in coords:\n dra, ddec = c1.spherical_offsets_to(c2)\n dra = dra.to(u.deg)\n ddec = ddec.to(u.deg)\n if width is None and height is None:\n width = dra\n height = ddec\n else:\n if dra > width:\n width = dra\n if ddec > height:\n height = ddec\n\n observation_simsurvey(\n observations,\n localization.id,\n instrument.id,\n survey_efficiency_analysis_id,\n survey_efficiency_analysis_type,\n width=width.value,\n height=height.value,\n number_of_injections=payload['number_of_injections'],\n number_of_detections=payload['number_of_detections'],\n detection_threshold=payload['detection_threshold'],\n minimum_phase=payload['minimum_phase'],\n maximum_phase=payload['maximum_phase'],\n model_name=payload['model_name'],\n optional_injection_parameters=payload['optional_injection_parameters'],\n )", "def info_date(source_files: AllSourceFilenames = AllSourceFilenames(),\n out_datefirst: OutputCommonData = OutputCommonData(\"cwb.datefirst\"),\n out_datelast: OutputCommonData = OutputCommonData(\"cwb.datelast\"),\n datefrom: AnnotationAllSourceFiles = AnnotationAllSourceFiles(\"[dateformat.out_annotation]:dateformat.datefrom\"),\n dateto: AnnotationAllSourceFiles = AnnotationAllSourceFiles(\"[dateformat.out_annotation]:dateformat.dateto\"),\n timefrom: AnnotationAllSourceFiles = AnnotationAllSourceFiles(\"[dateformat.out_annotation]:dateformat.timefrom\"),\n timeto: AnnotationAllSourceFiles = AnnotationAllSourceFiles(\"[dateformat.out_annotation]:dateformat.timeto\")):\n first_date = None\n last_date = None\n\n for file in source_files:\n from_dates = sorted((int(x[0]), x[1]) for x in datefrom.read_attributes(file, (datefrom, timefrom)) if x[0])\n if from_dates and (first_date is None or from_dates[0] < first_date):\n first_date = from_dates[0]\n to_dates = sorted((int(x[0]), x[1]) for x in dateto.read_attributes(file, (dateto, timeto)) if x[0])\n if to_dates and (last_date is None or to_dates[-1] > last_date):\n last_date = to_dates[-1]\n\n if not first_date or not last_date:\n raise SparvErrorMessage(\"Corpus is configured as having date information, but no dates were found.\")\n\n # Parse and re-format dates (zero-padding dates with less than 8 digits, needed by strptime)\n first_date_d = datetime.strptime(f\"{str(first_date[0]).zfill(8)} {first_date[1]}\", \"%Y%m%d %H%M%S\")\n first_date_formatted = first_date_d.strftime(\"%Y-%m-%d %H:%M:%S\")\n last_date_d = datetime.strptime(f\"{str(last_date[0]).zfill(8)} {last_date[1]}\", \"%Y%m%d %H%M%S\")\n last_date_formatted = last_date_d.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n out_datefirst.write(first_date_formatted)\n out_datelast.write(last_date_formatted)", "def _setup_account_general(insid, start_date, rate_dict, counterparty,\n prf_name, account_name, reinvest,\n funding_instype, external_id=None):\n calendar = acm.FCalendar['ZAR Johannesburg']\n next_bus_day = calendar.AdjustBankingDays(acm.Time.DateToday(), 1)\n day_after_start_date = calendar.AdjustBankingDays(start_date, 1)\n # Make sure that two conditions are met:\n # 1. End date doesn't lie in the past.\n # 2. Start date predates end date.\n end_date = max(next_bus_day, day_after_start_date)\n\n deposit = acm.FInstrument[insid]\n if deposit:\n LOGGER.info(\"The instrument {} already exists\".format(insid))\n if deposit.ExternalId1():\n LOGGER.info(\"Updating the external id from {} to {}\".format(\n deposit.ExternalId1(), external_id))\n deposit.ExternalId1(external_id)\n deposit.Commit()\n return None\n\n LOGGER.info('Creating %s...', insid)\n acm.BeginTransaction()\n try:\n # Instrument\n deposit = acm.FDeposit()\n deposit.Currency(CURRENCY)\n deposit.Name(insid)\n deposit.DayCountMethod(DAY_COUNT_METHOD)\n deposit.SpotBankingDaysOffset(0)\n # this sets the exp_time, which has a higher priority over exp_day,\n # which is set when calling re_rate(...) from ael. If the exp_time\n # is not set, acm (trading manager) uses the exp_day.\n # deposit.ExpiryDate(end_date)\n deposit.ContractSize(1)\n deposit.Quotation('Clean')\n deposit.QuoteType('Clean')\n deposit.OpenEnd('Open End')\n deposit.MinimumPiece(MINIMUM_PIECE)\n deposit.PayOffsetMethod('Business Days')\n if external_id:\n deposit.ExternalId1(external_id)\n\n # Leg\n leg = deposit.CreateLeg(1)\n leg.LegType('Call Fixed Adjustable')\n leg.Decimals(11)\n leg.StartDate(start_date)\n leg.EndDate(end_date)\n leg.EndPeriodUnit('Days')\n leg.DayCountMethod(DAY_COUNT_METHOD)\n if rate_dict['type'] == 'fixed':\n leg.FixedRate(rate_dict['rate'])\n leg.ResetDayOffset(0)\n leg.ResetType('Weighted')\n leg.ResetPeriod('1d')\n leg.ResetDayMethod('Following')\n leg.Currency(CURRENCY)\n leg.NominalFactor(1)\n leg.Rounding('Normal')\n leg.RollingPeriod('1m')\n leg.RollingPeriodBase(acm.Time.FirstDayOfMonth(acm.Time.DateAddDelta(\n start_date, 0, 1, 0)))\n leg.PayDayMethod('Following')\n leg.PayCalendar(calendar)\n leg.FixedCoupon(True)\n leg.NominalAtEnd(True)\n leg.FloatRateFactor(1)\n leg.FixedCoupon(True)\n leg.StartPeriod('-1d')\n leg.Reinvest(reinvest)\n if rate_dict['type'] == 'float':\n deposit.AddInfoValue('CallFloatRef', rate_dict['ref'])\n deposit.AddInfoValue('CallFloatSpread', rate_dict['spread'])\n deposit.Commit() # Commits both the instrument and the leg.\n\n # Trade\n trade = acm.FTrade()\n trade.Instrument(deposit)\n trade.Counterparty(counterparty)\n trade.Acquirer('PRIME SERVICES DESK')\n trade.AcquireDay(start_date)\n trade.ValueDay(start_date)\n trade.Quantity(1)\n trade.TradeTime(start_date)\n trade.Currency(CURRENCY)\n trade.Price(0)\n trade.Portfolio(acm.FPhysicalPortfolio[prf_name])\n trade.Type('Normal')\n trade.TradeTime(start_date)\n trade.Status('Simulated') # To allow for delete in case of rollback.\n trade.AddInfoValue('Funding Instype', funding_instype)\n trade.AddInfoValue('Call_Region', 'BB SANDTON')\n trade.AddInfoValue('Account_Name', account_name)\n trade.Commit()\n \n acm.CommitTransaction()\n except Exception as e:\n acm.AbortTransaction()\n LOGGER.exception(\"Could not create call/loan account {}\".format(insid))\n raise e\n\n deposit = acm.FInstrument[insid]\n if deposit:\n trades = deposit.Trades()\n if trades:\n LOGGER.info('The following trade has been created:{}\\n'.format(trades[0].Oid()))\n else:\n raise RuntimeError('Could not create trade!')\n else:\n raise RuntimeError('Could not create deposit!')", "def get_time_series(for_date, instr, spec):\n return acm.FTimeSeries.Select01(\"day = '%s' and recaddr = %i \"\n \"and timeSeriesSpec = %i and runNo = 1\"\n % (for_date, instr.Oid(), spec.Oid()), '')", "def test_integer_params(self):\n test_date = get_by_values(4, 5, 6, 2016)\n self.assertEquals(test_date, date(2016, 6, 25))", "def interview_date_default(self, interview_date_default):\n\n self._interview_date_default = interview_date_default", "def test_time_series_intraday_date_integer_python2(self, mock_urlopen):\n ts = TimeSeries(key=TestAlphaVantage._API_KEY_TEST,\n output_format='pandas', indexing_type='integer')\n url = \"https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=1min&apikey=test\"\n path_file = self.get_file_from_url(url)\n with open(path_file) as f:\n mock_urlopen.return_value = f\n data, _ = ts.get_intraday(\n \"MSFT\", interval='1min', outputsize='full')\n assert type(data.index[0]) == int", "def setup_base_settings(\n self,\n number_of_dates_to_be_generated_per_patient,\n dunning_cycle_length,\n possible_invoice_dates,\n customer_ids,\n dob_range,\n cpt_codes,\n distributions,\n ):\n self.number_of_dates_to_be_generated_per_patient = (\n number_of_dates_to_be_generated_per_patient\n )\n self.dunning_cycle_length = dunning_cycle_length\n self.dates = possible_invoice_dates\n self.customerIds = customer_ids\n self.consecutive = check_consecutive(self.customerIds)\n if self.consecutive == False:\n print(\"Error setting up the object- customerIds aren't consecutive\")\n print(self.customerIds)\n self.dobs = pd.date_range(\n start=dob_range[\"start_dob\"],\n end=dob_range[\"end_dob\"],\n periods=len(self.customerIds),\n ).date # range of valid dates of birth for the patients\n self.CPTCodes = cpt_codes # CPT codes to choose from\n self.invoices = [\n 10000\n ] # first invoice id- other invoices are monotonically increasing i.e. generated by adding one to the previous invoice.\n\n # dictionary used to define the assumptions used in generating the data set\n self.distributions = distributions\n return True", "def __init__(self,start,end,wav,cadence,series='aia.lev1_euv_12s',\n segment='image',email=None,odir=None,\n overwrite=True,max_con=1,dfmt='%Y/%m%/d %H:%M:%S'):\n\n #list of acceptable wavelengths\n self.awavs = [94,131,171,193,211,304,335,1600,1700]\n #list of acceptable segments\n self.asegs = ['image','spike','None']\n #list of acceptable series\n self.asers = ['aia.lev1_uv_24s','aia.lev1_euv_12s','aia.lev1']\n\n\n #check if overwrite flag is set (Default = True)\n if isinstance(overwrite,bool):\n self.overwrite = overwrite\n else:\n sys.stdout.write('overwrite must be a boolean')\n quit()\n\n #check if max_conn is int\n if isinstance(max_con,int):\n self.max_con = max_con\n else:\n sys.stdout.write('max_con must be a boolean')\n quit()\n\n #check segment\n if isinstance(segment,str):\n self.segment = segment\n if self.segment not in self.asegs:\n sys.stdout.write('segment not in acceptable segment list')\n quit()\n else:\n sys.stdout.write('segment must be a string')\n quit()\n\n \n #check series\n if isinstance(series,str):\n self.series = series\n if self.series not in self.asers:\n sys.stdout.write('series not in acceptable series list')\n quit()\n else:\n sys.stdout.write('series must be a string')\n quit()\n\n\n #check output directory\n if isinstance(odir,str):\n self.odir = odir\n elif odir is None:\n self.odir = './'\n else:\n sys.stdout.write('odir must be a string')\n quit()\n\n #check email is string\n if isinstance(email,str):\n self.email = email\n else:\n sys.stdout.write('email must be a string')\n quit()\n\n\n\n #check that start and end are datetime objects\n #make sure datetime formatter is string\n if isinstance(dfmt,str):\n self.dfmt = dfmt\n else:\n sys.stdout.write('datetime formatter must be string')\n quit()\n\n #check inserted start time\n if isinstance(start,datetime):\n self.start = start\n elif isinstance(start,str):\n self.start = datetime.strptime(start,dfmt)\n else:\n sys.stdout.write('Start time must be datetime object or formatted string')\n\n\n #check inserted end time\n if isinstance(end,datetime):\n self.end = end\n elif isinstance(end,str):\n self.end = datetime.strptime(end,dfmt)\n else:\n sys.stdout.write('End time must be datetime object or formatted string')\n\n\n #check if cadence is a string\n #if not so convert the cadence \n #assuming it is given in seconds\n if isinstance(cadence,str):\n self.cadence = cadence\n elif isinstance(cadence,(int,float)):\n self.cadence = str(cadence)+'s'\n else:\n sys.stdout.write('Cadence must be a string, integer, or float')\n quit()\n\n #check input wavelength formatting\n #check formatting assuming float or int\n if isinstance(wav,(int,float)):\n self.wav = [int(wav)*u.AA]\n #check to make sure wavelength is allowed\n if int(self.wav.value) not in self.awavs:\n sys.stdout.write('{0:3.0f} not an acceptable wavelength'.format(self.wav.value))\n quit()\n\n #check formatting assuming string \n elif isinstance(wav,str):\n self.wav = [int(wav)*u.AA]\n #check to make sure wavelength is allowed\n if int(self.wav[0].value) not in self.awavs:\n sys.stdout.write('{0:3.0f} not an acceptable wavelength'.format(self.wav.value))\n quit()\n\n #check formatting assuming array\n elif isinstance(wav,(list,np.ndarray)):\n self.wav = []\n for i in wav:\n if isinstance(i,(float,int)):\n self.wav.append(int(i)*u.AA)\n elif isinstance(i,str):\n self.wav.append(int(i)*u.AA)\n #check to make sure wavelength is allowed\n if int(self.wav[-1].value) not in self.awavs:\n sys.stdout.write('{0:3.0f} not an acceptable wavelength'.format(i.value))\n quit()\n\n\n #format input wavelength\n if isinstance(wav,list):\n self.wav = [ int(i)*u.AA for i in wav]\n elif isinstance(wav,(str,int)):\n self.wav = [int(wav)*u.AA]", "def check_required_range(specific=None, begin=None, end=None):\n\n if not specific and not (begin and end):\n raise ValueError('You must pass some form of date filter')\n\n if specific and (begin and end):\n raise ValueError('Cannot pass both a range and specific dates')\n\n if (begin and not end) or (end and not begin):\n raise ValueError(\"Must pass both begin and end for date range\")", "def get_daily_data_from_stooq(ticker_symbol, start_date, end_date):\n # check whether the start_date and end_date are strings\n if isinstance(start_date, str) and isinstance(end_date, str):\n pass\n else:\n raise ValueError(\"Dates passed to the function are not strings!!!\")\n # validate formats of dates passed to the function\n validate_date_format_yyy_mm_dd(start_date)\n print(\"Validation of start_date format result: positive...\")\n validate_date_format_yyy_mm_dd(end_date)\n print(\"Validation of end_date format result: positive...\")\n d_1 = start_date.replace(\"-\", \"\")\n d_2 = end_date.replace(\"-\", \"\")\n temp_url = \"https://stooq.com/q/d/l/?s=\" + ticker_symbol + \"&d1=\" \\\n + d_1 + \"&d2=\" + d_2 + \"&i=d\"\n print(\"Getting data from URL: \", temp_url)\n # try-except block to catch the cases when the ticker symbol is nonexistent\n try:\n data_in = pd.read_csv(temp_url, usecols=['Date', 'Close'],\n parse_dates=[0])\n except ValueError:\n print(\"ValueError occurred! Probably a nonexistent ticker has been\"\n \" passed to the function\")\n except Exception:\n print(\"General error has occurred! Please check function arguments...\")\n else:\n # if data is obtained, rename \"Close\" ===> ticker name\n data_in.rename(columns={\"Close\": ticker_symbol}, inplace=True)\n return data_in", "def _get_normal_date(self, args):\n\n func1, func2, func3 = args\n self.assertIsNotNone(func1(20130201, \"20190120\"))\n self.assertIsNotNone(func2(\"2013/02/01\", \"2019-01-20\"))\n self.assertIsNotNone(func3(r\"2013-/\\-02~@-\\/-@~01\",\n pd.to_datetime('2019-01-20')))", "def from_start_date_to_end_date(start, end):\n\n first_canonicalized = start.replace(\" \", \"\").lower()\n second_canonicalized = end.replace(\" \", \"\").lower()\n first_search_date = start.replace(\" \", \"\").lower()\n second_search_date = end.replace(\" \", \"\").lower() \n all_dates_between_start_date_and_end_date = [multiple_dates for multiple_dates in temperature_parameters_list if multiple_dates[\"date\"\n ] >= first_search_date and multiple_dates[\"date\"] <= second_search_date]\n \n if first_search_date == first_canonicalized and second_search_date == second_canonicalized:\n return jsonify(all_dates_between_start_date_and_end_date)\n\n return jsonify({\"error\": f\"{start} and {end} not found.\"}), 404", "def fit_timeseries(xdates, ydata):\n\n pass", "def for_date(self, date):\n return self.get(start_date__lte=date, end_date__gte=date)", "def preprocess_dates(args):\n if 'date' in args:\n if args.get('period') == 'range' and 'end_date' in args:\n args['date'] = '{},{}'.format(args['date'],\n args['end_date'])\n return args", "def __init__(self, stocks=None, start_date='FiveYear', end_date='Today', features=None, verbose=False, capital=0):\n\n # Set default features\n if type(features) is not list:\n features = [features]\n\n if features is None:\n features = []\n\n if DataTypes.ALL in features:\n features = DataTypes.ALL\n\n # set variables for a stock universe\n self.verbose = verbose\n self.stocks = stocks\n self.features = features\n self.stock_data = {}\n end_date = datetime.datetime.today() if end_date == 'Today' \\\n else datetime.datetime.strptime(end_date, \"%Y-%m-%d\")\n\n if type(end_date) is not datetime.datetime and type(end_date) is not pd.tslib.Timestamp:\n end_date = datetime.datetime.strptime(end_date, \"%Y-%m-%d\")\n\n self.end_date = end_date\n\n start_date = end_date - datetime.timedelta(365 * 5 + 1) if start_date == 'FiveYear' \\\n else datetime.datetime.strptime(start_date, \"%Y-%m-%d\")\n\n if type(start_date) is not datetime.datetime and type(start_date) is not pd.tslib.Timestamp:\n start_date = datetime.datetime.strptime(start_date, \"%Y-%m-%d\")\n\n self.start_date = start_date\n\n self.date = start_date # initial date that the stock universe is on\n\n # create a list of dates in the YYYY-MM-DD format\n self.str_dates = []\n self.dates = []\n\n self.starting_capital = capital\n self.cash = []\n\n self.collect_all_stock_data()\n self.unique_data = {}\n self.shuffled_data_reset()\n # TODO add ability to order stocks and build a profile having total percent returns as well as capital\n # TODO have ability to select types of data to get fundementals, trends, stock twits anal,\n # TODO ad meter, past prices and volumes, twitter reddit and press releases", "def test_range():\n begin_date = datetime.datetime(2000, 1, 1)\n end_date = datetime.datetime.today()\n\n if os.path.isfile(\"spy_price_cache_\" + str(datetime.date.today()) + \".csv\"):\n dates_available = pickle.load(open(\"spy_price_cache_\" + str(datetime.date.today()) + \".csv\", \"r\"))\n else:\n prices_available = yahoo.webload_symbol_price(\"SPY\", begin_date, end_date)\n dates_available = set(timestamp.to_pydatetime() for timestamp in prices_available.index.tolist())\n pickle.dump(dates_available, open(\"spy_price_cache_\" + str(datetime.date.today()) + \".csv\", \"w\"))\n\n dates_expected = set([day for day in itertools.takewhile(\n lambda d: d <= end_date,\n CALENDAR.every_nth_between(begin_date, end_date, 1)\n )])\n\n dates_misaligned = dates_available.symmetric_difference(dates_expected)\n\n assert len(dates_misaligned) == 0", "def test_date_range():\n year = 2012\n cres_m = get_curtailment(year, curt_fn='curtailment.json')[0]\n cres_dr = get_curtailment(year, curt_fn='curtailment_date_range.json')[0]\n for df_res, site in cres_m:\n gid = int(site.name)\n assert np.allclose(df_res['windspeed'], cres_dr[gid]['windspeed'])", "def test_time_series_intraday_date_indexing_python2(self, mock_urlopen):\n ts = TimeSeries(key=TestAlphaVantage._API_KEY_TEST,\n output_format='pandas', indexing_type='date')\n url = \"https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=1min&apikey=test\"\n path_file = self.get_file_from_url(url)\n with open(path_file) as f:\n mock_urlopen.return_value = f\n data, _ = ts.get_intraday(\n \"MSFT\", interval='1min', outputsize='full')\n assert type(data.index[0]) == str", "def visitPeriod(self, date):\n raise NotImplementedError()", "def test_second_date_lower(self):\n input_ = (datetime.date(2015, 10, 24), datetime.date(2014, 12, 12))\n with self.assertRaises(ValueError):\n self.expander._get_next_days(*input_)", "def __init__(self, start_date=\"2017-01-01\", end_date=datetime.datetime.now().strftime(\"%Y-%m-%d\"), asset_list=[]):\n\n self.start_date = start_date\n self.end_date = end_date\n self.asset_list = asset_list\n self.portfolio = pd.DataFrame()\n self.benchmark = san.get(\"ohlcv/bitcoin\", from_date=start_date,\n to_date=end_date).closePriceUsd.pct_change()\n\n for portfolio_asset in asset_list:\n self.portfolio[portfolio_asset] = san.get(\"ohlcv/\" + portfolio_asset,\n from_date=start_date,\n to_date=end_date).closePriceUsd.pct_change()\n self.portfolio = self.portfolio.replace([np.inf, -np.inf], 0)\n self.metrics = dict()", "def search_by_date_range(self, tl):\n print(\"Search by date range\")\n dates = input(\"Please use YYYYMMDD-YYYYMMDD for date range: \")\n date1_str, date2_str = dates.split('-')\n try:\n date1 = datetime.datetime.strptime(date1_str, utils.fmt)\n date2 = datetime.datetime.strptime(date2_str, utils.fmt)\n except ValueError as err:\n utils.print_error(err)\n return self.search_by_date_range(tl)\n else:\n return tl.findall_date_range(date1, date2)", "def test_change_of_year(self):\n\n input_ = [\n self.indicator_record(date=datetime.date(2006, 11, 1), value=0.31),\n self.indicator_record(date=datetime.date(2006, 12, 1), value=0.48),\n ]\n output = self.expander._ipca_from_15_expander(input_)\n expected = self.indicator_record(date=datetime.date(2007, 1, 1), value=0.35)\n actual = output[-1]\n\n self.assertEqual(expected, actual)", "def mast_query(instrument, templates, start_date, end_date, aperture=None, detector=None, filter_name=None,\n pupil=None, grating=None, readpattern=None, lamp=None):\n\n # If a single template name is input as a string, put it in a list\n if isinstance(templates, str):\n templates = [templates]\n\n # Make sure instrument is correct case\n instrument = JWST_INSTRUMENT_NAMES_MIXEDCASE[instrument.lower()]\n\n # instrument_inventory does not allow list inputs to\n # the added_filters input (or at least if you do provide a list, then\n # it becomes a nested list when it sends the query to MAST. The\n # nested list is subsequently ignored by MAST.)\n # So query once for each flat template, and combine outputs into a\n # single list.\n query_results = []\n for template_name in templates:\n\n # Create dictionary of parameters to add\n parameters = {\"date_obs_mjd\": {\"min\": start_date, \"max\": end_date},\n \"exp_type\": template_name}\n\n if detector is not None:\n parameters[\"detector\"] = detector\n if aperture is not None:\n parameters[\"apername\"] = aperture\n if filter_name is not None:\n parameters[\"filter\"] = filter_name\n if pupil is not None:\n parameters[\"pupil\"] = pupil\n if grating is not None:\n parameters[\"grating\"] = grating\n if readpattern is not None:\n parameters[\"readpatt\"] = readpattern\n if lamp is not None:\n parameters[\"lamp\"] = lamp\n\n query = instrument_inventory(instrument, dataproduct=JWST_DATAPRODUCTS,\n add_filters=parameters, return_data=True, caom=False)\n if len(query['data']) > 0:\n query_results.extend(query['data'])\n\n return query_results", "def get_simulate_date(start, end):\n start_year, start_month, start_day = parse_string_datetime(start)\n end_year, end_month, end_day = parse_string_datetime(end)\n if simulatedate_checkinput(start, end) == 0:\n start_time = datetime.datetime(start_year, start_month, start_day)\n end_time = datetime.datetime(end_year, end_month, end_day)\n return start_time, end_time", "def daily_insolation_limits(irrad, clearsky, daily_min=0.4, daily_max=1.25):\n daily_irradiance = _daily_total(irrad)\n daily_clearsky = _daily_total(clearsky)\n good_days = quality.util.check_limits(\n daily_irradiance/daily_clearsky,\n upper_bound=daily_max,\n lower_bound=daily_min\n )\n return good_days.reindex(irrad.index, method='pad', fill_value=False)", "def process_single_date(self, input_filepath):\n # first see if there are already files in the output location\n # (in which case we can skip this date)\n\n # normally the coordinates will be part of the file path\n coords_string = find_coords_string(input_filepath)\n # if not though, we might have coords set explicitly\n if (not coords_string) and \"coords\" in vars(self):\n coords_string = \"{}_{}\".format(self.coords[0],self.coords[1])\n date_string = input_filepath.split(\"/\")[-2]\n if not re.search(\"[\\d]{4}-[\\d]{2}-[\\d]{2}\", date_string):\n if date_range in vars(self):\n date_string = fid_mid_period(self.date_range[0], self.date_range[1])\n else:\n date_String = None\n if not coords_string and date_string:\n raise RuntimeError(\"{}: coords and date need to be defined, through file path or explicitly set\")\n\n output_location = os.path.dirname(self.construct_image_savepath(date_string,\n coords_string))\n if (not self.replace_existing_files) and \\\n self.check_for_existing_files(output_location, self.num_files_per_point):\n return True\n \n print(\"Proceeding.\")\n print(input_filepath)\n print(self.input_location_type)\n \n # If no files already there, proceed.\n filenames = [filename for filename in self.list_directory(input_filepath,\n self.input_location_type) \\\n if filename.endswith(\".tif\")]\n\n # extract this to feed into `convert_to_rgb()`\n band_dict = {}\n for icol, col in enumerate('rgb'):\n band = self.RGB_bands[icol]\n filename = self.get_file(os.path.join(input_filepath,\n \"download.{}.tif\".format(band)),\n self.input_location_type)\n band_dict[col] = {\"band\": band,\n \"filename\": filename\n }\n\n print(filenames)\n tif_filebase = os.path.join(input_filepath, filenames[0].split('.')[0])\n\n # save the rgb image\n rgb_ok = self.save_rgb_image(band_dict,\n date_string,\n coords_string)\n if not rgb_ok:\n print(\"Problem with the rgb image?\")\n return False\n\n # save the NDVI image\n ndvi_tif = self.get_file(os.path.join(input_filepath,\n \"download.NDVI.tif\"),\n self.input_location_type)\n ndvi_image = scale_tif(ndvi_tif)\n ndvi_filepath = self.construct_image_savepath(date_string,\n coords_string,\n 'NDVI')\n self.save_image(ndvi_image,\n os.path.dirname(ndvi_filepath),\n os.path.basename(ndvi_filepath))\n\n # preprocess and threshold the NDVI image\n processed_ndvi = process_and_threshold(ndvi_image)\n ndvi_bw_filepath = self.construct_image_savepath(date_string,\n coords_string,\n 'BWNDVI')\n self.save_image(processed_ndvi,\n os.path.dirname(ndvi_bw_filepath),\n os.path.basename(ndvi_bw_filepath))\n\n # split and save sub-images\n self.split_and_save_sub_images(ndvi_image,\n date_string,\n coords_string,\n \"NDVI\")\n\n self.split_and_save_sub_images(processed_ndvi,\n date_string,\n coords_string,\n \"BWNDVI\")\n\n return True", "def get_udis_series(initial_date: str, end_date:str) -> dict:\n\n url = f\"{BANXICO_URL}/{BANXICO_UDIS_SERIE}/datos/{initial_date}/{end_date}\"\n udis_response = _request_handler.get(url, headers=_headers)\n udis_values_per_day = {}\n response = {}\n if udis_response:\n name = udis_response.get(\"bmx\", {}).get(\"series\", [])[0].get(\"titulo\", \"\")\n dates = udis_response.get(\"bmx\", {}).get(\"series\", [])[0].get(\"datos\", \"\")\n if dates:\n for date in dates:\n udis_values_per_day[date.get(\"fecha\", \"\")] = float(date.get(\"dato\"))\n\n max_udi_value = (max(dates, key=lambda x:float(x.get(\"dato\", -1))))\n min_udi_value = (min(dates, key=lambda x:float(x.get(\"dato\", -1))))\n average_udi = float(sum(float(d['dato']) for d in dates)) / len(dates)\n response= {\n \"name\": name,\n \"average_udi_value\": average_udi,\n \"max_udi_value\": {\n \"value\": float(max_udi_value.get(\"dato\", -1)),\n \"date\": max_udi_value.get(\"fecha\", -1)\n },\n \"min_udi_value\":{\n \"value\": float(min_udi_value.get(\"dato\", -1)),\n \"date\": min_udi_value.get(\"fecha\", -1)\n },\n \"dates_udis\": udis_values_per_day\n }\n\n return response\n else:\n return {}", "def add_technical_indicator(df, tic):\n\n df['date'] = df.index\n df = df.reset_index(drop=True)\n cols = ['date'] + [col for col in df if col != 'date']\n df = df[cols]\n\n # drop duplicates\n df = df.drop_duplicates()\n\n # convert Date column to datetime\n df['date'] = pd.to_datetime(df['date'], format = '%Y-%m-%d')\n # df['date'] = pd.to_datetime(df['date'])\n\n # sort by datetime\n df.sort_values(by = 'date', inplace = True, ascending = True)\n\n stock = Sdf.retype(df.copy())\n\n temp_macd = stock['macd']\n temp_macds = stock['macds']\n temp_macdh = stock['macdh']\n macd = pd.DataFrame(temp_macd)\n macds = pd.DataFrame(temp_macds)\n macdh = pd.DataFrame(temp_macdh)\n\n temp_rsi = stock['rsi_6']\n rsi = pd.DataFrame(temp_rsi)\n\n temp_cci = stock['cci']\n cci = pd.DataFrame(temp_cci)\n\n temp_adx = stock['adx']\n adx = pd.DataFrame(temp_adx)\n\n temp_pdi = stock['pdi']\n temp_mdi = stock['mdi']\n pdi = pd.DataFrame(temp_pdi)\n mdi = pd.DataFrame(temp_mdi)\n\n df.insert(len(df.columns), \"daydate\",0)\n df.insert(len(df.columns), \"tic\",tic)\n\n df.insert(len(df.columns), \"macd\",0)\n df.insert(len(df.columns), \"macd_signal_line\",0)\n df.insert(len(df.columns), \"macd_hist\",0)\n\n df.insert(len(df.columns), \"rsi\",0)\n\n df.insert(len(df.columns), \"cci\",0)\n\n df.insert(len(df.columns), \"adx\",0)\n\n df.insert(len(df.columns), \"+DI\",0)\n df.insert(len(df.columns), \"-DI\",0)\n\n len_df = len(df)\n\n # CD Comment change the for by optimized solution\n for i in range(0,len_df,1):\n\n df.loc[i,\"daydate\"] = str(df.iloc[i][\"date\"])[0:10]\n\n df.loc[i,\"macd\"] = macd.iloc[i][0]\n df.loc[i,\"macd_signal_line\"] = macds.iloc[i][0]\n df.loc[i,\"macd_hist\"] = macdh.iloc[i][0]\n\n df.loc[i,\"rsi\"] = rsi.iloc[i][0]\n\n df.loc[i,\"cci\"] = cci.iloc[i][0]\n\n df.loc[i,\"adx\"] = adx.iloc[i][0]\n\n df.loc[i,\"+DI\"] = pdi.iloc[i][0]\n df.loc[i,\"-DI\"] = mdi.iloc[i][0]\n\n df['daydate'] = pd.to_datetime(df['daydate'], format = '%Y-%m-%d')\n\n\n cols = ['daydate'] + ['date'] + ['tic'] + [col for col in df if ((col != 'date') and (col != 'daydate') and (col != 'tic'))]\n df = df[cols]\n\n #df = df.replace([np.inf, -np.inf], np.nan).dropna(axis=1)\n df = df.replace([np.inf, -np.inf], np.nan).dropna()\n\n df = df.reset_index(drop=True)\n\n return df", "def select_date_interval_menu():\n while True:\n start_date = input('\\nInput desired start date with format dd-mm-yyyy:\\n')\n try:\n start_date = datetime.strptime(start_date, '%d-%m-%Y')\n break\n except ValueError:\n print('invalid start date selected')\n while True:\n end_date = input('\\nInput desired start date with format dd-mm-yyyy,\\nor hit enter to select todays date\\n')\n if end_date == '':\n end_date = date.today()\n break\n else:\n try:\n end_date = datetime.strptime(end_date, '%d-%m-%Y')\n break\n except ValueError:\n print('invalid end date selected')\n list_of_dates = pd.date_range(start_date, end_date, freq='d')\n list_of_dates = [i.strftime('%d%m%Y') for i in list_of_dates]\n return list_of_dates", "def _restricted_dates(date):\n _dates = list(date)\n try:\n return_date = datetime.strptime(date, '%Y-%m-%d').date()\n # end_date = datetime.strptime(dates[1], '%Y-%m-%d').date()\n except ValueError:\n raise argparse.ArgumentTypeError(\n f\"Could not parse dates. Did you format them yyyy-mm-dd? Dates received:\\n{date}\")\n\n # if start_date > end_date:\n # raise argparse.ArgumentTypeError(\n # f\"Start date {start_date} may not be later than end date {end_date}\")\n # return [start_date, end_date, 55]\n return return_date", "def filter_on_date(self, start, end, dataframe, datecol=\"datetime\"):\n return dataframe.loc[(dataframe[datecol] < end) & (dataframe[datecol] > start)]", "def RisetimeFinder(X, Y,startIndex,peakIndex,baseline):\n # Channel1Data is from first TOF\n # Channel2Data is from second TOF\n hitAmplitude = Y[peakIndex]\n UpperThreshold = baseline - (.7 * (baseline - hitAmplitude))\n LowerThreshold = baseline - (.3 * (baseline - hitAmplitude))\n riseTimestart = 0\n riseTimeend = 0\n riseIndex = 0\n fallIndex = 0\n diffs = Y[startIndex:peakIndex]-UpperThreshold\n value = np.min(abs(diffs))\n noiserms = np.std(Y[:50])*5\n YStart = Y[startIndex]\n YSign =np.sign(Y[startIndex])\n #print(value,diffs)\n #print(np.where(value == abs(diffs))[0][0])\n riseIndex = int(np.where(value == abs(diffs))[0][0]) + startIndex\n diffs = Y[startIndex:peakIndex]-LowerThreshold\n value = np.min(abs(diffs))\n fallIndex = int(np.where(value == abs(diffs))[0][0]) + startIndex\n riseTimestart = Interpolator(X, Y, riseIndex-1,riseIndex+1,UpperThreshold)\n riseTimeend = Interpolator(X, Y, fallIndex-1,fallIndex+1,LowerThreshold)\n #print(UpperThreshold,LowerThreshold)\n result = dict()\n result['risetime'] = riseTimestart-riseTimeend\n result['starttime'] = riseTimeend\n if riseTimestart < X[startIndex] or riseTimestart > X[EndIndex] or riseTimeend < X[startIndex] or riseTimeend > X[EndIndex]:\n result['risetime']= False\n if riseTimestart - riseTimeend > (X[EndIndex] - X[startIndex]):\n result['risetime']= False\n if riseTimestart - riseTimeend <= 0:\n result['risetime']= False\n if riseIndex == 0 or fallIndex ==0:\n result['risetime']= False\n if YSign > 0:\n if(YStart > baseline + noiserms):\n result['risetime']= False\n if YSign < 0:\n if(YStart < baseline - noiserms):\n result['risetime']= False\n if len(np.unique(np.sign(np.diff(Y[fallIndex:startIndex])))) > 1:\n result['risetime']= False\n\n return result", "def date_search(data, start_date, end_date):\n # change dates for date search\n data['timestamp'] = pd.to_datetime(data['timestamp']).dt.date\n d1 = datetime.datetime.strptime(f'{start_date}', '%Y-%m-%d').date()\n d2 = datetime.datetime.strptime(f'{end_date}', '%Y-%m-%d').date()\n\n # constrict data by date search parameters\n less_data = data[(data['timestamp'] >= d1) & (data['timestamp'] <= d2)]\n\n return less_data", "def sinterp(date, lastextremedate, deltatonext, low, hub, rising=True, verbose=False):\n # offset from date to the last extreme date\n deltatolast = date-lastextremedate\n # the factor from the date to pi scale\n factor = deltatolast.seconds/float(deltatonext.seconds)\n # the mapping into the pi scale\n x = np.pi*factor\n # rising(True) or falling slope(False)\n if rising:\n phase = -1\n else:\n phase = 1\n # return the interpolation with sinus flank\n result = (np.sin(x+phase*(.5*np.pi))+1)*.5*hub+low\n if verbose:\n print 'interpolating'\n print ' date:', date\n print ' deltatolast:', deltatolast\n print ' deltatonext:', deltatonext \n print ' factor:', factor\n print ' x:', x\n print ' result:', result\n return result", "def start_end(start_date,end_date):\n\n session = Session(engine)\n\n # Query from database full temp results for dates range\n temp_results = session.query(func.min(measurement.tobs), func.avg(measurement.tobs), func.max(measurement.tobs)).\\\n filter(measurement.date >= start_date).\\\n filter(measurement.date <= end_date).all()\n \n session.close() \n \n return jsonify(temp_results)", "def gains_btw_dates(self, date_ini='Ini', date_fin='today', pct=False):\n assert date_fin == 'today' or isinstance(date_fin, date), 'Error! You have to pass a datetime.date istance to date parameters.'\n assert date_ini == 'Ini' or isinstance(date_ini, date), 'Error! You have to pass a datetime.date istance to date parameters.'\n assert isinstance(pct, bool), 'Error! The pct parameter must be boolean.'\n if date_fin == 'today':\n date_fin = self.data.index[-1]\n if date_ini == 'Ini':\n date_ini = self.data.index[0]\n assert date_ini >= self.data.index[0], 'Error ! Invalid Initial Date'\n assert date_fin >= self.data.index[0], 'Error ! Invalid Final Date'\n date_fin = self._first_good_date(date_fin)\n if date_ini == self.data.index[0]:\n profit = self.data.loc[date_fin, 'Profit/Loss']\n else:\n #date_ini = self._first_good_date(self._first_good_date(date_ini) - timedelta(1))\n date_ini = self._first_good_date(date_ini - timedelta(1))\n profit = self.data.loc[date_fin, 'Profit/Loss'] - self.data.loc[date_ini, 'Profit/Loss']\n if pct:\n return round(profit / self.value(date_ini) * 100, 2)\n else:\n return round(profit, 2)", "def set_date_range(self, range):\n\n date_range_string = \"{0} to {1}\".format(\n self.fmt_date(range.start_date), self.fmt_date(range.end_date)\n )\n log.info(\"Specifying a date range of: {0}\".format(date_range_string))\n\n # Enter the specified date range\n selector = self._driver.find_element_by_css_selector(self.DateRangeSelector)\n selector.clear()\n selector.send_keys(date_range_string)", "def fill_missing_date_range():\n pickle_dir ='/misc/yoda/www/plots/user/sheep'\n #pickle_dir = '/Users/ken/Downloads/sheep'\n drange = get_missing_date_range(pickle_dir)\n if drange:\n print 'fill date range', drange\n pickle_date_range(drange[0], drange[1])", "def daily_new_cases(start_date, end_date, lga_name, columnname_lga = COLUMN_LGA, columnname_cases = COLUMN_CASES, excel_file_name = EXCEL_FILE_NAME):\n\n start_date = dt.strptime(start_date, '%m-%d') \n end_date = dt.strptime(end_date, '%m-%d')\n \n # data to be drawn in the line graph\n x_date = []\n y_cases = []\n\n with open(EXCEL_FILE_NAME, 'rb') as f:\n df = pd.read_excel(f, index=False)\n\n # keep updating current_date in for-loop to find the right column in excel\n # change data type to str and parse index 5-9 to get the format of 'mm-dd'\n current_date = str(start_date)[5:10]\n no_of_days = end_date - start_date\n for i in range (no_of_days.days + 1):\n column_name = columnname_cases + current_date\n\n x_date.append(current_date)\n y_cases.append(int(df[column_name].loc[df[columnname_lga] == lga_name].to_list()[0]))\n\n current_date = dt.strptime(current_date, '%m-%d') + td(days=1) \n current_date = str(current_date)[5:10]\n\n data = []\n data.append(x_date)\n data.append(y_cases)\n return data" ]
[ "0.6696412", "0.5244132", "0.5244106", "0.5234904", "0.523354", "0.5202343", "0.50902385", "0.49597186", "0.4926985", "0.49259138", "0.4911848", "0.48814812", "0.4860115", "0.48596224", "0.48520213", "0.48501316", "0.4843819", "0.4843819", "0.48264506", "0.48252285", "0.4812543", "0.4811632", "0.48099044", "0.48043033", "0.4801722", "0.47961712", "0.4783006", "0.47822672", "0.47802654", "0.47764415", "0.47752887", "0.47666857", "0.4757027", "0.47531044", "0.47457927", "0.47247747", "0.47090685", "0.47060227", "0.469961", "0.46984807", "0.4692947", "0.468458", "0.4666626", "0.46559584", "0.46498576", "0.46483973", "0.46470296", "0.46435875", "0.46370724", "0.46270734", "0.46241543", "0.4619045", "0.46097594", "0.46057907", "0.4603669", "0.46033394", "0.46008375", "0.4599217", "0.45961764", "0.45888522", "0.4586701", "0.4586157", "0.45855194", "0.4582703", "0.45801434", "0.45790014", "0.457532", "0.45638058", "0.4559981", "0.4554444", "0.45521325", "0.45498943", "0.45473754", "0.4539654", "0.4536391", "0.45337883", "0.45330828", "0.45280805", "0.452349", "0.45209008", "0.4517137", "0.45155352", "0.45146286", "0.45124397", "0.45109025", "0.45106593", "0.45101485", "0.45097753", "0.45081607", "0.45074543", "0.45033807", "0.45026875", "0.45003524", "0.44982374", "0.44907528", "0.4487667", "0.44867235", "0.44864568", "0.44840378", "0.4481719" ]
0.707812
0
choose the correct instrument to use for observations for a given date range based on the primary instrument for that time period. inputs must be date objects from the datetime module.
def choose_prime_inst(given_start_date,given_end_date): #extracting primary dates where instruments are active from csv file inst_prime_dates = pd.read_csv(ref_path / 'GOES_primary_assignments.csv', header=3) #figuring out which instrument is primary for given start date for d in range(len(inst_prime_dates['Start Date'])): change_date = parse(inst_prime_dates['Start Date'][d]) if given_start_date >= change_date.date(): prime_inst = inst_prime_dates['EPEAD Primary'][d] backup_inst = inst_prime_dates['EPEAD Secondary'][d] end_date = parse(inst_prime_dates['Start Date'][d+1]).date() #if no prime instrument available, have to choose which instrument #to use based on which instruments have data for this date if str(prime_inst) == 'nan': if str(backup_inst) == 'nan': print('no information about primary instrument available.' 'Choosing instrument based on active date ranges') alternate_output = choose_inst(given_start_date,given_end_date) return(alternate_output) else: prime_inst = backup_inst break prime_inst = str(prime_inst).split('.')[0] #reformatting instrument name if len(prime_inst) == 2: inst_str = str(prime_inst) elif len(prime_inst) == 1: inst_str = '0' + str(prime_inst) print('GOES-%s is the primary instrument for given start time' %inst_str) #checking to make sure this primary instrument actually has data year = str(given_start_date).split('-')[0] month = str(given_start_date).split('-')[1] url = ('https://satdat.ngdc.noaa.gov/sem/goes/data/avg/'+ year + '/' + month + '/goes' + inst_str) try: request.urlopen(url) print('GOES-%s has data available' %inst_str) instrument = 'GOES-' + inst_str print('we are using %s as our instrument for observations' %instrument) except request.HTTPError: #if primary instrument doesn't have data for this date, using backup instrument print('GOES-%s does NOT have data available' %inst_str) #reformatting backup instrument if len(str(backup_inst)) == 2: inst_str = str(backup_inst) elif len(str(backup_inst)) ==1: inst_str = '0' + str(backup_inst) print('checking for data from backup instrument GOES-%s' %inst_str) url = ('https://satdat.ngdc.noaa.gov/sem/goes/data/avg/'+ year + '/' + month + '/goes' + inst_str) #checking to see if backup instrument has data for this date, if not have #to manually choose which instrument to use based off which instruments #have data available try: request.urlopen(url) print('backup instrument data found - using backup instrument') instrument = 'GOES-' + inst_str print('we are using %s as our instrument for observations' %instrument) except request.HTTPError: print('no knowledge of backup or primary instrument - choosing ' 'instrument based on available data') alternate_output = choose_inst(given_start_date,given_end_date) return(alternate_output) return([instrument,end_date])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def choose_inst(given_start_date,given_end_date): #INPUTS MUST BE DATE OBJECTS\r\n\r\n inst_start_dates=[]\r\n inst_end_dates=[]\r\n good_instruments = []\r\n good_end_dates = []\r\n bad_inst = []\r\n\r\n #extracting dates where instruments are active from csv file\r\n inst_dates = pd.read_csv(ref_path / 'instrument_dates.csv')\r\n\r\n for s in inst_dates['start']:\r\n inst_start_dates.append(datetime.strptime(str(s),'%Y-%m').date())\r\n\r\n for e in inst_dates['end']:\r\n if str(e) == 'nan':\r\n inst_end_dates.append(datetime.today().date())\r\n else:\r\n inst_end_dates.append(datetime.strptime(str(e),'%Y-%m').date())\r\n\r\n #checking which instruments are active during given time period and\r\n #choosing the correct ones\r\n print('checking which instruments are active for given dates')\r\n\r\n for i in range(len(inst_start_dates)):\r\n if (inst_start_dates[i] < given_start_date) and (given_end_date <\r\n inst_end_dates[i]):\r\n print('%s works' %inst_dates['Instrument'][i])\r\n good_instruments.append(inst_dates['Instrument'][i])\r\n good_end_dates.append(inst_end_dates[i])\r\n else:\r\n print('outside of %s range' %inst_dates['Instrument'][i])\r\n\r\n #checking if active instruments actually have data for that date\r\n for inst in good_instruments:\r\n inst_str = inst.replace('-','').lower()\r\n year = str(given_start_date).split('-')[0]\r\n month = str(given_start_date).split('-')[1]\r\n url = ('https://satdat.ngdc.noaa.gov/sem/goes/data/avg/'+ year + '/' +\r\n month + '/' + inst_str)\r\n\r\n try:\r\n request.urlopen(url)\r\n print('%s data available' %inst)\r\n\r\n except:\r\n print('%s data NOT available' %inst)\r\n bad_inst.append(inst)\r\n\r\n #not choosing instrument if it doesn't have data\r\n for binst in bad_inst:\r\n good_instruments.remove(binst)\r\n\r\n #if more than one instrument is available, choose which one to use\r\n if len(good_instruments) > 1:\r\n print('Please choose which instrument you would like to use.')\r\n\r\n for j in range(len(good_instruments)):\r\n print('Type ' + str(j) + ' for ' + str(good_instruments[j]))\r\n\r\n inst_choice = input('Answer:' )\r\n\r\n instrument = good_instruments[int(inst_choice)]\r\n end_date = good_end_dates[int(inst_choice)]\r\n\r\n print('we are using %s as our instrument for observations' %instrument)\r\n\r\n else:\r\n\r\n instrument = good_instruments[0]\r\n end_date = good_end_dates[0]\r\n print('we are using %s as our instrument for observations' %instrument)\r\n\r\n return([instrument,end_date])", "def __init__(self, code, start_date=\"1900-01-01\", end_date=\"2020-01-01\"):\n base = Base()\n self.datas = base.getData(\n code=code, start_date=start_date, end_date=end_date)\n self._index = 0\n self.period = 14", "def database_extraction(mod_start_time,mod_end_time,instrument_chosen,subevent_bool,\r\n detect_previous_event = False,thresholds='100,1',\r\n one_thresh = False):\r\n obs_file_created = False\r\n\r\n #extending time window\r\n window_end_time = (mod_end_time + timedelta(days=2))\r\n window_start_time = (mod_start_time - timedelta(days=2))\r\n \r\n #making a list of all dates within window\r\n day_list=[]\r\n for d in range(10):\r\n day_list.append((window_start_time + timedelta(days=d)).date())\r\n print('day list = %s' %day_list)\r\n \r\n print('determining if an instrument has been chosen')\r\n\r\n if instrument_chosen:\r\n #if an instrument has been chosen, checking to make sure it still works for this date\r\n if inst_end < window_end_time:\r\n instrument_chosen = False\r\n else:\r\n #if insturment hasn't been chosen, figuring out what it should be for given date\r\n try:\r\n #if instrument is specified in cfg using that\r\n instrument = cfg.instrument\r\n inst_end = datetime.today()\r\n print('using %s as our instrument for observations' %instrument)\r\n instrument_chosen = True\r\n\r\n except:\r\n #choosing instrument using function if not given in cfg\r\n instrument_stuff = choose_prime_inst(window_start_time.date(),\r\n window_end_time.date())\r\n instrument = instrument_stuff[0]\r\n #figuring out how long we can use this instrument\r\n inst_end = instrument_stuff[1]\r\n instrument_chosen = True\r\n \r\n #running katie's code to extract data using chosen instrument and dates\r\n print('extracting data from GOES website')\r\n \r\n #running for only one threshold if one_thresh is true, otherwise running for default\r\n #thresholds as well as any additional threshold given\r\n if one_thresh:\r\n one_sep.run_all(str(window_start_time), str(window_end_time), str(instrument),\r\n 'integral', '', '', True, detect_previous_event, thresholds) \r\n print('ran for threshold %s' %thresholds)\r\n else:\r\n if subevent_bool:\r\n thresholds = '10,1'\r\n #if event is a subevent, changing the threshold in katie's code to\r\n #10 MeV > 1pfu so that it will be recorded\r\n print('********************SUBEVENT**************************')\r\n sep.run_all(str(window_start_time), str(window_end_time), str(instrument),\r\n 'integral', '', '', True, detect_previous_event, thresholds)\r\n print('ran for subevent')\r\n else:\r\n #if an event, running with usual thresholds\r\n print('********************EVENT*****************************')\r\n sep.run_all(str(window_start_time), str(window_end_time),str(instrument), \r\n 'integral', '', '', True, detect_previous_event, thresholds)\r\n \r\n #reloading function so it doesn't keep old data \r\n reload(sep)\r\n \r\n #reformatting csv created from katie's code to json\r\n print('extracted - reformatting') \r\n for day in day_list: \r\n if not obs_file_created:\r\n #checking each day within the window to find the csv file if it hasn't\r\n #already been found\r\n print('thresholds: %s' %thresholds)\r\n \r\n if one_thresh:\r\n #name includes threshold if only ran for one threshold\r\n new_obs_name = ('sep_values_' + str(instrument) + '_integral_gt' +\r\n str(thresholds).split(',')[0] + '_' + str(thresholds).split(',')[1] + 'pfu_' +\r\n day.strftime('%Y_%m_%d').replace('_0','_') + '.csv')\r\n else:\r\n #otherwise only includes date ran for\r\n new_obs_name = ('sep_values_' + str(instrument) + '_integral_' +\r\n day.strftime('%Y_%m_%d').replace('_0','_') + '.csv')\r\n \r\n print('new_os_name %s' %new_obs_name) \r\n \r\n #checking if that file exists\r\n if os.path.exists(katies_path / new_obs_name):\r\n #if a file with this date exists, creating the corresponding json file\r\n \r\n #json name\r\n if one_thresh:\r\n obs_name = (str(instrument) + '_' + str(day) + 'only_' + str(thresholds).split(',')[0] + 'MeV_event.json')\r\n else:\r\n obs_name = (str(instrument) + '_' +\r\n str(day) + '.json')\r\n #creating json file\r\n obs_csv2json((katies_path / new_obs_name), obs_name,\r\n (ref_path/'example_sepscoreboard_json_file_v20190228.json'),\r\n instrument)\r\n \r\n print('obs file created')\r\n #file is created - will not run for anymore dates within window\r\n obs_file_created = True\r\n \r\n return(obs_name)\r\n else:\r\n print('no csv file found with this date, checking next one')", "def test_date_interval(self, init_date, end_date):\n self.calc_earning(self.security[(self.security['Date'] > init_date) &\n (self.security['Date'] < end_date)])", "def date_match(self,dateRange, input_frame):\n # find match with a exact date, output one element\n if dateRange['Start Date'] == dateRange['End Date']:\n # convert dtype to datetime64, match the data in dataframe\n exact_date = np.datetime64(dateRange['Start Date'])\n # key = column name, value = keyword\n target_time = input_frame[input_frame['Start Date'] == exact_date]\n # if search a range\n else:\n # only a start date or an end date entered\n if dateRange['Start Date'] == '' or dateRange['End Date'] == '':\n # only a start date input, then return the data from entered date to most recent\n if dateRange['End Date'] == '':\n start = np.datetime64(dateRange['Start Date'])\n target_time = input_frame[input_frame['Start Date'] >= start]\n # only an ende date input, then return the data before the entered date\n else:\n end = np.datetime64(dateRange['End Date'])\n target_time = input_frame[input_frame['Start Date'] <= end]\n # convert datatype to datetime64, match the data in dataframe\n else:\n start = np.datetime64(dateRange['Start Date'])\n end = np.datetime64(dateRange['End Date'])\n # mask target_time\n target_time = input_frame[(input_frame['Start Date'] <= end) & (input_frame['Start Date'] >= start)]\n # return filtered dataframe\n return target_time", "def rate_between(self, from_date, to_date):\n print(\"override the above\")", "def returnDatesAndRegions(start=None, end=None, theRegs=None, isWeekly=False, isViral=False):\r\n\t# Default values\r\n\tregions = [\"global\", \"ad\", \"ar\", \"at\", \"au\", \"be\", \"bg\", \"bo\", \"br\", \"ca\", \"ch\", \"cl\", \"co\", \"cr\", \"cy\", \"cz\", \"de\", \"dk\", \"do\", \"ec\", \"ee\", \"es\", \"fi\", \"fr\", \"gb\", \"gr\", \"gt\", \"hk\", \"hn\", \"hu\", \"id\", \"ie\", \"il\", \"is\", \"it\", \"jp\", \"lt\", \"lu\", \"lv\", \"mc\", \"mt\", \"mx\",\"my\", \"ni\", \"nl\", \"no\", \"nz\", \"pa\", \"pe\", \"ph\", \"pl\", \"pt\", \"py\", \"ro\", \"se\", \"sg\", \"sk\", \"sv\", \"th\", \"tr\", \"tw\", \"us\", \"uy\", \"vn\"]\r\n\tviralWeeklyStart = \"2017-01-05\"\r\n\ttopWeeklyStart = \"2016-12-22\"\r\n\tallDailyStart = \"2017-01-01\"\r\n\r\n\t#Required since dates taken are very specific\r\n\tdefaultList = defaultListOfDates(isWeekly, isViral)\r\n\t#--------------------------------------------\r\n\r\n\t# Helper for Exception handling\r\n\tif(isWeekly and isViral):\r\n\t\tfunc = \"viral50Weekly\"\r\n\telif(isWeekly and not isViral):\r\n\t\tfunc = \"top200Weekly\"\r\n\telif(not isWeekly and isViral):\r\n\t\tfunc = \"viral50Daily\"\r\n\telif(not isWeekly and not isViral):\r\n\t\tfunc = \"top200Daily\"\r\n\t# \r\n\r\n\t# Start dates\r\n\tif(start is None): #From the beginning\r\n\t\tif(isWeekly):\r\n\t\t\tif(isViral):\r\n\t\t\t\tstart = datetime.datetime.strptime(viralWeeklyStart, \"%Y-%m-%d\")\r\n\t\t\telse:\r\n\t\t\t\tstart = datetime.datetime.strptime(topWeeklyStart, \"%Y-%m-%d\") \r\n\t\telse:\r\n\t\t\tstart = datetime.datetime.strptime(allDailyStart, \"%Y-%m-%d\")\r\n\telse:\r\n\t\tif(start in defaultList):\r\n\t\t\tstart = datetime.datetime.strptime(start, \"%Y-%m-%d\")\r\n\t\telse:\r\n\t\t\torderedList = sorted(defaultList, key=lambda x: datetime.datetime.strptime(x, \"%Y-%m-%d\") - datetime.datetime.strptime(start, \"%Y-%m-%d\"))\r\n\t\t\tclosest = [d for d in orderedList if d >= start]\r\n\t\t\tsuggest = closest[0:5]\r\n\t\t\tlogger.info(f\"The start date {start} provided for {func} is invalid. Wanna give one these a try? {suggest}\")\r\n\t\t\tchoice = input(\"Enter (1) to use the first suggestion, or (2) to quit and set yourself: \")\r\n\t\t\tif(int(choice) == 1):\r\n\t\t\t\tstart = datetime.datetime.strptime(suggest[0], \"%Y-%m-%d\")\r\n\t\t\telif(int(choice) == 2):\r\n\t\t\t\tsys.exit()\r\n\t\t\telse:\r\n\t\t\t\traise FyChartsException(\"Invalid Choice.\")\r\n\r\n\r\n\t# End dates\r\n\tif(end is None): #Up to now\r\n\t\tend = datetime.datetime.now()\r\n\telse:\r\n\t\tend = datetime.datetime.strptime(end, \"%Y-%m-%d\")\r\n\t\t\r\n\r\n\t# Region\r\n\tregion = []\r\n\tif(theRegs is None):\r\n\t\tregion = regions\r\n\telse:\r\n\t\tif(type(theRegs) is not list):\r\n\t\t\tregs = []\r\n\t\t\tregs.append(theRegs)\r\n\t\t\ttheRegs = regs\r\n\t\t\t\r\n\t\tfor aReg in theRegs:\r\n\t\t\tif(aReg in regions):\r\n\t\t\t\tregion.append(aReg)\r\n\t\t\telse:\r\n\t\t\t\traise FyChartsException(f\"Data for the region --> {aReg} <-- requested for {func} does not exist. Please try another region\")\r\n\r\n\t#Generate list of dates\r\n\tdates = [] \r\n\tif(isWeekly): \r\n\t\tif(isViral):\r\n\t\t\tgen = [start + datetime.timedelta(weeks=x) for x in range(0, (end-start).days+1)]\r\n\t\t\tfor date in gen:\r\n\t\t\t\tif(date<end):\r\n\t\t\t\t\tdt = date + datetime.timedelta(days=0)\r\n\t\t\t\t\tdates.append(dt.strftime(\"%Y-%m-%d\"))\r\n\t\telse:\r\n\t\t\tgen = [start + datetime.timedelta(weeks=x) for x in range(0, (end-start).days+1)]\r\n\t\t\tfor date in gen:\r\n\t\t\t\tif(date<end):\r\n\t\t\t\t\tdt = date + datetime.timedelta(days=0)\r\n\t\t\t\t\tdates.append(dt.strftime(\"%Y-%m-%d\"))\r\n\r\n\telse:\r\n\t\tgen = [start + datetime.timedelta(days=x) for x in range(0, (end-start).days+1)]\r\n\t\tfor date in gen:\r\n\t\t\tif(date<=end):\r\n\t\t\t\tdates.append(date.strftime(\"%Y-%m-%d\"))\r\n\r\n\tvar = {\"dates\": dates, \"region\": region}\r\n\treturn var", "def _get_output_date_range_for(self, from_input_dt, to_input_dt):\n return from_input_dt, to_input_dt", "def _get_input_date_range_for(self, from_output_dt, to_output_dt):\n # If comb is adaptive, the required input date range needs to account for the time window\n if self.is_adaptive:\n if from_output_dt is None:\n return from_output_dt, to_output_dt\n return from_output_dt-timedelta(days=self.time_window), to_output_dt\n # Otherwise, the comb is already trained and does not need to fill up the time window first\n return from_output_dt, to_output_dt", "def get_interest_variable(\n in_dataset, sensor_var, date_col, hr_col, numeric_var, target_sensor=\"A620\"\n):\n dataset_pproc = in_dataset.loc[\n in_dataset[sensor_var] == target_sensor, [date_col, hr_col] + [numeric_var]\n ]\n hrs_str = dataset_pproc[hr_col].to_string()\n dates_str = dataset_pproc[date_col]\n\n dataset_pproc[date_col] = pd.to_datetime(dataset_pproc[date_col])\n dataset_pproc.set_index([date_col, hr_col], inplace=True)\n dataset_pproc.fillna(method=\"ffill\", inplace=True)\n dataset_pproc.interpolate(method=\"linear\", axis=0)\n\n return dataset_pproc", "def find_within_dates(self,\r\n datefrom=(1,1,1),\r\n dateto=(3000,12,31),\r\n withinrange=None,\r\n orequal=False,\r\n most_recent=False):\r\n\r\n def convert (date):\r\n\r\n if isinstance(date,str):\r\n #If input is a string convert to a tuple\r\n date += '-01-01'\r\n date = datefrom.split(DASH)\r\n year, month, day = date[0].replace(PLUS,DASH), date[1], date[2]\r\n date = int(year), int(month), int(day)\r\n if isinstance(date, (list,tuple)):\r\n #If a tuple, convert to a datetime object\r\n date = datetime.datetime(date[0],date[1],date[2])\r\n return date\r\n\r\n if withinrange is None:\r\n #If not range assigned, default to all indexes\r\n withinrange = self.indexes()\r\n\r\n datefrom = convert(datefrom)\r\n dateto = convert(dateto)\r\n\r\n\r\n if not orequal:\r\n return [a_temp for a_temp in withinrange\r\n if self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True)> datefrom\r\n and self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True) < dateto]\r\n return [a_temp for a_temp in withinrange\r\n if self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True) >= datefrom and\r\n self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True) <= dateto]", "def define_secdate(self):\r\n \r\n # Since 2017\r\n self.start_date = datetime.datetime(2017,1,1) + (datetime.datetime(2017,12,31) - datetime.datetime(2017,1,1))/2 \r\n self.end_date = datetime.datetime(2050,1,1)\r\n self.ktime = (self.end_date - self.start_date).days + 1\r\n self.date = np.zeros(self.ktime,dtype=datetime.datetime)\r\n self.t = np.zeros(self.ktime)\r\n self.dt = 1/365.25\r\n \r\n for k in range(0,self.ktime):\r\n \r\n self.date[k] = self.start_date + datetime.timedelta(days=self.t[k]*365.25)\r\n\r\n if k < self.ktime-1:\r\n \r\n self.t[k+1] = self.t[k] + self.dt\r\n \r\n # Since 1990\r\n self.start_date_hist = datetime.datetime(1990,1,1) + (datetime.datetime(1990,12,31) - datetime.datetime(1990,1,1))/2 \r\n self.ktime_1990_2050 = (self.end_date - self.start_date_hist).days + 1\r\n self.date_1990_2050 = np.zeros(self.ktime_1990_2050,dtype=datetime.datetime)\r\n self.t_1990_2050 = np.zeros(self.ktime_1990_2050)\r\n \r\n for k in range(0,self.ktime_1990_2050):\r\n \r\n self.date_1990_2050[k] = self.start_date_hist + datetime.timedelta(days=self.t_1990_2050[k]*365.25)\r\n \r\n if (self.date_1990_2050[k].year == self.start_date.year and self.date_1990_2050[k].month == self.start_date.month and self.date_1990_2050[k].day == self.start_date.day):\r\n \r\n self.ktime_proj_crossing = k\r\n \r\n \r\n if k < self.ktime-1:\r\n \r\n self.t_1990_2050[k+1] = self.t_1990_2050[k] + self.dt \r\n \r\n return", "def update_dates(start_date, end_date, freq):\n if (freq == \"MS\") or (freq == \"M\"):\n try:\n start_date = start_date.split(\"/\")\n end_date = end_date.split(\"/\")\n except AttributeError:\n start_date = [start_date.month, start_date.day, start_date.year]\n end_date = [end_date.month, end_date.day, end_date.year]\n if int(end_date[1]) < 22:\n\n if int(end_date[0]) == 1:\n end_month = 12\n end_year = int(end_date[2]) - 1\n else:\n end_month = int(end_date[0]) - 1\n end_year = end_date[2]\n\n end_date[0] = end_month\n end_date[2] = end_year\n\n start_date = pd.to_datetime(f\"{start_date[0]}/01/{start_date[2]}\")\n\n end_date = pd.to_datetime(\n f\"{end_date[0]}/{calendar.monthrange(int(end_date[2]),int(end_date[0]))[1]}/{end_date[2]}\"\n )\n\n if (freq == \"QS\") or (freq == \"Q\"):\n start_date = (pd.to_datetime(start_date) + pd.tseries.offsets.DateOffset(days=1)) - pd.offsets.QuarterBegin(\n startingMonth=1\n )\n end_date = (pd.to_datetime(end_date) + pd.tseries.offsets.DateOffset(days=1)) - pd.offsets.QuarterEnd()\n\n return (start_date, end_date)", "def date_range(start, end):\n \"\"\"between the start and end date inclusive.\"\"\"\n # Create a link to the session\n session = Session(engine)\n \n # Get the start and end date of the data\n final_date = session.query(Measurements.date).order_by(Measurements.date.desc()).first()[0]\n first_date = session.query(Measurements.date).order_by(Measurements.date.asc()).first()[0]\n \n # Make sure dates are in range of available data\n if (start > final_date) or (start < first_date) or (end > final_date) or (end < first_date) or (start>end):\n return f\"{start} - {end} is not a proper date range.</br>Try dates between {first_date} - {final_date}\"\n\n # Query the min, avg, and max temps for the given timeframe\n results = []\n while start <= end:\n min_temp = session.query(func.min(Measurements.tobs)).filter(Measurements.date==start).first()[0]\n avg_temp = session.query(func.avg(Measurements.tobs)).filter(Measurements.date==start).first()[0]\n max_temp = session.query(func.max(Measurements.tobs)).filter(Measurements.date==start).first()[0]\n \n # Store the information retrieved\n results.append([start, min_temp, avg_temp, max_temp])\n \n # Update the date to check the next record\n date1 = start.split(\"-\")\n date1 = dt.date(int(date1[0]), int(date1[1]), int(date1[2])) + dt.timedelta(days=1)\n start = date1.strftime(\"%Y-%m-%d\")\n\n session.close()\n\n # Create a dictionary from the query results\n date_temps = []\n for date, min_temp, avg_temp, max_temp in results:\n date_temps_dict = {}\n date_temps_dict[\"date\"] = date\n date_temps_dict[\"min_temp\"] = min_temp\n date_temps_dict[\"avg_temp\"] = round(avg_temp, 2)\n date_temps_dict[\"max_temp\"] = max_temp\n date_temps.append(date_temps_dict)\n \n return jsonify(date_temps)", "def to_stock_data_range(self, start_date=None, end_date=None):\n # standardize dates\n if end_date is None:\n end_date = self.dates[-2]\n if type(end_date) is pd.tslib.Timestamp:\n end_date = end_date.strftime(\"%Y-%m-%d\")\n if type(end_date) is not datetime.datetime and type(end_date) is not pd.tslib.Timestamp:\n end_date = datetime.datetime.strptime(end_date, \"%Y-%m-%d\")\n try:\n end_date = self.dates[list(self.dates).index(end_date) + 1]\n except:\n end_date = \"Last\"\n\n if start_date is None:\n start_date = self.dates[0]\n if type(start_date) is not datetime.datetime and type(start_date) is not pd.tslib.Timestamp:\n start_date = datetime.datetime.strptime(start_date, \"%Y-%m-%d\")\n\n if end_date is \"Last\":\n dates = list(self.dates)[list(self.dates).index(start_date):]\n else:\n dates = list(self.dates)[list(self.dates).index(start_date):list(self.dates).index(end_date)]\n\n # find functions to set\n dataframes = [i for i in dir(self) if not callable(getattr(self, i)) and not i.startswith(\"__\")\n and type(getattr(self, i)) is pd.DataFrame]\n dictionaries = [i for i in dir(self) if not callable(getattr(self, i)) and not i.startswith(\"__\")\n and type(getattr(self, i)) is dict]\n constant_values = [i for i in dir(self) if not callable(getattr(self, i)) and not i.startswith(\"__\")\n and getattr(self, i) is not None and i not in dataframes and i not in dictionaries]\n\n # transfer new data\n new_stock_data = StockData()\n\n for i in constant_values:\n setattr(new_stock_data, i, getattr(self, i))\n\n for i in dataframes:\n if end_date is not \"Last\":\n setattr(new_stock_data, i, getattr(self, i).ix[start_date:end_date])\n else:\n setattr(new_stock_data, i, getattr(self, i).ix[start_date:])\n\n for i in dictionaries:\n new_dict = {}\n for d in dates:\n new_dict[d] = getattr(self, i)[d]\n setattr(new_stock_data, i, new_dict)\n\n new_stock_data.dates = dates\n new_stock_data.str_dates = [str(d)[:USEFUL_TIMESTAMP_CHARS] for d in dates]\n\n return new_stock_data", "def valuation(self, from_date=None):\n import pandas_datareader.data as pdr\n import datetime\n to_date = datetime.date.today()\n if not from_date: from_date = to_date - datetime.timedelta(days=1)\n px = pdr.DataReader(self.ticker, 'yahoo', from_date, to_date)\n\n f = self.Fundamentals\n\n print(\"OF COURSE \", 7, f, px)\n # for i in set(f.perod_end_date):", "def resampleDataSet(dailyData, resampleString, resampleMethod, customFunction = None):\n\n # Make sure the index is sorted\n dailyData.sort_index(level='Datetime', inplace=True)\n\n # Get today's date\n today = datetime.now()\n\n # Create a new empty series\n resampleData = pd.Series([], index = pd.DatetimeIndex([]))\n\n # Get information about the daily data\n firstDate = dailyData.index[0][0]\n\n # Parse the resample string\n resampleList = resampleString.split('/') # Converts 'R/1978-10-01/P1M/F1Y' into ['R', '1978-10-01', 'P1M', 'F1Y', 'S1Y']\n\n # Validate the list\n if resampleList[0] != 'R' or len(resampleList[1]) != 10 or resampleList[2][0] != 'P' or resampleList[3][0] != 'F': #or len(resampleList) != 4\n return resampleData, 1, 'Invalid Resample String. Format should be similar to R/1978-10-01/P1M/F1Y or R/1978-10-01/P1M/F1Y/S1Y'\n \n # Validate the resample method\n if resampleMethod not in ['accumulation', 'accumulation_cfs_kaf', 'average', 'first', 'last', 'max', 'min', 'custom', 'median']:\n return resampleData, 1, \"Invalid resampling method. Provide one of 'accumulation', 'accumulation_cfs_kaf', 'average', 'first', 'last', 'max', 'min', 'custom', 'median'\"\n\n # Parse into values\n startDate = datetime.strptime(resampleList[1], '%Y-%m-%d') # >>> datetime.date(1978, 10, 1)\n period = isodate.parse_duration(resampleList[2]) # >>> isodate.duration.Duration(0, 0, 0, years=0, months=1)\n # Change the period to 1 day if the resample method is 'first'\n if resampleMethod == 'first':\n period = isodate.parse_duration(\"P1D\")\n frequency = isodate.parse_duration(resampleList[3].replace('F', 'P')) # >>> isodate.duration.Duration(0, 0, 0, years=1, months=1)\n\n # Create all the periods\n periods = []\n tracker = startDate\n while tracker <= today: # >>> periods = [(datetime.datetime(1978-10-01), datetime.datetime(1978-11-01))]\n periods.append((tracker, tracker+period))\n tracker += frequency\n\n # Parse the function\n func = lambda x: np.nan if x.isnull().all() else (np.nanmean(x) if resampleMethod == 'average' else (\n np.nansum(x) if resampleMethod == 'accumulation' else (\n 86400*(1/43560000)*np.nansum(x) if resampleMethod == 'accumulation_cfs_kaf' else (\n x.iloc[0] if resampleMethod == 'first' else (\n x.iloc[-1] if resampleMethod == 'last' else (\n np.nanmedian(x) if resampleMethod == 'median' else (\n np.nanmax(x) if resampleMethod == 'max' else (\n np.nanmin(x) if resampleMethod == 'min' else eval(customFunction)))))))))\n\n # Resample the data\n for idx in pd.IntervalIndex.from_tuples(periods):\n data = dailyData.loc[idx.left : idx.right]\n if resampleMethod != 'first' and resampleMethod != 'last':\n data.isMostlyThere = len(data) > int(0.95*(idx.right-idx.left).days) # Check to make sure 95% of data is there!\n else:\n data.isMostlyThere = True\n resampleData.loc[idx.left] = ( func(data) if (idx.right >= firstDate and today >= idx.right and (data.isMostlyThere)) else np.nan )\n\n if len(resampleList) == 5:\n shiftStrings = list(resampleList[4])\n if shiftStrings[1].isdigit():\n resampleData.index = resampleData.index + pd.offsets.DateOffset(years=int(shiftStrings[1]))\n else:\n return resampleData, 1, \"Invalid Resample String. Format should be similar to R/1978-10-01/P1M/F1Y or R/1978-10-01/P1M/F1Y/S1Y\"\n\n\n # Name the dataframe\n resampleData.name = dailyData.name + '_' + resampleList[1] + '_' + resampleList[2] + '_' + resampleList[3] + '_' + resampleMethod + '_' + str(customFunction)\n\n return resampleData", "def <start>/<end>(<start>/<end>)\ndef calc_temps(start_date, end_date):", "def get_values_between_dates(self, date_start=None, date_end=None, dt_max=0.0, start_strict=False, end_strict=True):\n \n if start_strict:\n start_diff_operator = '>'\n else:\n start_diff_operator = '>='\n if end_strict:\n end_diff_operator = '<'\n else:\n end_diff_operator = '<='\n \n if dt_max < 0.:\n raise Exception('dt_max must be > 0')\n \n if (date_start is not None) and (date_end is not None):\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO WHERE datetime(date_data) %s datetime(?) AND datetime(date_data) %s datetime(?) ORDER BY datetime(date_data)\"%(start_diff_operator, end_diff_operator), \\\n params=[self.date2str(date_start-timedelta(dt_max)), self.date2str(date_end+timedelta(dt_max))])\n elif (date_start is not None):\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO WHERE datetime(date_data) %s datetime(?) ORDER BY datetime(date_data)\"%start_diff_operator, \\\n params=[self.date2str(date_start-timedelta(dt_max))])\n elif (date_end is not None):\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO WHERE datetime(date_data) %s datetime(?) ORDER BY datetime(date_data)\"%end_diff_operator, \\\n params=[self.date2str(date_end+timedelta(dt_max))])\n else:\n return self.read_as_pandas_dataframe(\"SELECT * FROM FILEINFO ORDER BY datetime(date_data)\")", "def __getQuerysetGivenInterval(model, start_date, end_date):\n cur_model = {\n 'donor': Donor,\n 'donation': Donation,\n 'item': Item\n }.get(model, Donor.objects.none())\n\n # might need following lines when changing back to created_at:\n # date_format = \"%Y-%m-%d\"\n # if start_date is not None:\n # timezone_unaware_start_date = datetime.strptime(start_date, date_format)\n # timezone_aware_start_date = pytz.utc.localize(timezone_unaware_start_date)\n #\n # if end_date is not None:\n # timezone_unaware_end_date = datetime.strptime(end_date, date_format)\n # timezone_aware_end_date = pytz.utc.localize(timezone_unaware_end_date).date()\n\n if start_date is not None and end_date is not None:\n return cur_model.objects.filter(documented_at__range=(start_date, end_date))\n elif start_date is not None and end_date is None:\n return cur_model.objects.filter(documented_at__gte=start_date)\n elif start_date is None and end_date is not None:\n return cur_model.objects.filter(documented_at__lte=end_date)\n else:\n return cur_model.objects.all()", "def IRIS_ARC_IC(input, clients):\n \n if input[clients + '_ic_auto'] == 'Y':\n global events \n Period = input['min_date'].split('T')[0] + '_' + \\\n input['max_date'].split('T')[0] + '_' + \\\n str(input['min_mag']) + '_' + str(input['max_mag'])\n eventpath = os.path.join(input['datapath'], Period)\n address = eventpath\n elif input[clients + '_ic'] != 'N':\n address = input[clients + '_ic']\n \n events, address_events = quake_info(address, 'info')\n \n for i in range(0, len(events)):\n sta_ev = read_station_event(address_events[i])\n ls_saved_stas = []\n \n for j in range(0, len(sta_ev[0])):\n if clients == sta_ev[0][j][13]:\n station_id = sta_ev[0][j][0] + '.' + sta_ev[0][j][1] + '.' + \\\n sta_ev[0][j][2] + '.' + sta_ev[0][j][3]\n ls_saved_stas.append(os.path.join(address_events[i], 'BH_RAW',\\\n station_id))\n \n print 'event: ' + str(i+1) + '/' + str(len(events)) + \\\n ' -- ' + clients\n print '------------------------------------'\n inst_correct(input, ls_saved_stas, address_events[i], clients) \n \n print \"**********************************\"\n print clients.upper() + ' Instrument Correction is DONE'\n print \"**********************************\"", "def compute(self, today, asset_ids, out, low):\n today_day = today.weekday()\n current_end_week_idx = today_day\n current_start_week_idx = 4 + today_day\n # current_week_high = high[current_end_week_idx:current_start_week_idx, :].max(axis=0)\n current_week_low = low[current_end_week_idx:current_start_week_idx, :].min(\n axis=0)\n out[:] = current_week_low", "def test_new_items_have_increasing_dates(self):\n input_ = [\n self.indicator_record(date=datetime.date(2004, 11, 1), value=0.69),\n self.indicator_record(date=datetime.date(2004, 12, 1), value=0.86),\n self.indicator_record(date=datetime.date(2005, 1, 1), value=0.58),\n ]\n records = self.expander._ipca_from_15_expander(input_)\n\n self.assertTrue(records[-1].date > input_[-1].date)", "def _recompute(self):\n current_date = self.start_date\n self.quarterly_date_list = []\n self.daily_date_list = []\n while current_date <= self.end_date:\n current_quarter = get_quarter(current_date)\n current_year = current_date.year\n next_year, next_quarter = add_quarter(current_year, current_quarter)\n next_start_quarter_date = date(next_year, get_month(next_quarter),\n 1)\n\n days_till_next_quarter = (next_start_quarter_date -\n current_date).days\n days_till_end = (self.end_date - current_date).days\n if days_till_next_quarter <= days_till_end:\n current_start_quarter_date = date(current_year,\n get_month(current_quarter), 1)\n if current_start_quarter_date == current_date:\n self.quarterly_date_list.append(\n (current_year, current_quarter, lambda x: True))\n current_date = next_start_quarter_date\n elif days_till_next_quarter > self.balancing_point:\n self.quarterly_date_list.append(\n (current_year, current_quarter,\n lambda x: date(x['date_filed']) >= self.start_date))\n current_date = next_start_quarter_date\n else:\n while current_date < next_start_quarter_date:\n self.daily_date_list.append(current_date)\n current_date += timedelta(days=1)\n else:\n if days_till_end > self.balancing_point:\n if days_till_next_quarter - 1 == days_till_end:\n self.quarterly_date_list.append(\n (current_year, current_quarter, lambda x: True))\n current_date = next_start_quarter_date\n else:\n self.quarterly_date_list.append(\n (current_year, current_quarter,\n lambda x: date(x['date_filed']) <= self.end_date))\n current_date = self.end_date\n else:\n while current_date <= self.end_date:\n self.daily_date_list.append(current_date)\n current_date += timedelta(days=1)", "def _setup_account_general(insid, start_date, rate_dict, counterparty,\n prf_name, account_name, reinvest,\n funding_instype, external_id=None):\n calendar = acm.FCalendar['ZAR Johannesburg']\n next_bus_day = calendar.AdjustBankingDays(acm.Time.DateToday(), 1)\n day_after_start_date = calendar.AdjustBankingDays(start_date, 1)\n # Make sure that two conditions are met:\n # 1. End date doesn't lie in the past.\n # 2. Start date predates end date.\n end_date = max(next_bus_day, day_after_start_date)\n\n deposit = acm.FInstrument[insid]\n if deposit:\n LOGGER.info(\"The instrument {} already exists\".format(insid))\n if deposit.ExternalId1():\n LOGGER.info(\"Updating the external id from {} to {}\".format(\n deposit.ExternalId1(), external_id))\n deposit.ExternalId1(external_id)\n deposit.Commit()\n return None\n\n LOGGER.info('Creating %s...', insid)\n acm.BeginTransaction()\n try:\n # Instrument\n deposit = acm.FDeposit()\n deposit.Currency(CURRENCY)\n deposit.Name(insid)\n deposit.DayCountMethod(DAY_COUNT_METHOD)\n deposit.SpotBankingDaysOffset(0)\n # this sets the exp_time, which has a higher priority over exp_day,\n # which is set when calling re_rate(...) from ael. If the exp_time\n # is not set, acm (trading manager) uses the exp_day.\n # deposit.ExpiryDate(end_date)\n deposit.ContractSize(1)\n deposit.Quotation('Clean')\n deposit.QuoteType('Clean')\n deposit.OpenEnd('Open End')\n deposit.MinimumPiece(MINIMUM_PIECE)\n deposit.PayOffsetMethod('Business Days')\n if external_id:\n deposit.ExternalId1(external_id)\n\n # Leg\n leg = deposit.CreateLeg(1)\n leg.LegType('Call Fixed Adjustable')\n leg.Decimals(11)\n leg.StartDate(start_date)\n leg.EndDate(end_date)\n leg.EndPeriodUnit('Days')\n leg.DayCountMethod(DAY_COUNT_METHOD)\n if rate_dict['type'] == 'fixed':\n leg.FixedRate(rate_dict['rate'])\n leg.ResetDayOffset(0)\n leg.ResetType('Weighted')\n leg.ResetPeriod('1d')\n leg.ResetDayMethod('Following')\n leg.Currency(CURRENCY)\n leg.NominalFactor(1)\n leg.Rounding('Normal')\n leg.RollingPeriod('1m')\n leg.RollingPeriodBase(acm.Time.FirstDayOfMonth(acm.Time.DateAddDelta(\n start_date, 0, 1, 0)))\n leg.PayDayMethod('Following')\n leg.PayCalendar(calendar)\n leg.FixedCoupon(True)\n leg.NominalAtEnd(True)\n leg.FloatRateFactor(1)\n leg.FixedCoupon(True)\n leg.StartPeriod('-1d')\n leg.Reinvest(reinvest)\n if rate_dict['type'] == 'float':\n deposit.AddInfoValue('CallFloatRef', rate_dict['ref'])\n deposit.AddInfoValue('CallFloatSpread', rate_dict['spread'])\n deposit.Commit() # Commits both the instrument and the leg.\n\n # Trade\n trade = acm.FTrade()\n trade.Instrument(deposit)\n trade.Counterparty(counterparty)\n trade.Acquirer('PRIME SERVICES DESK')\n trade.AcquireDay(start_date)\n trade.ValueDay(start_date)\n trade.Quantity(1)\n trade.TradeTime(start_date)\n trade.Currency(CURRENCY)\n trade.Price(0)\n trade.Portfolio(acm.FPhysicalPortfolio[prf_name])\n trade.Type('Normal')\n trade.TradeTime(start_date)\n trade.Status('Simulated') # To allow for delete in case of rollback.\n trade.AddInfoValue('Funding Instype', funding_instype)\n trade.AddInfoValue('Call_Region', 'BB SANDTON')\n trade.AddInfoValue('Account_Name', account_name)\n trade.Commit()\n \n acm.CommitTransaction()\n except Exception as e:\n acm.AbortTransaction()\n LOGGER.exception(\"Could not create call/loan account {}\".format(insid))\n raise e\n\n deposit = acm.FInstrument[insid]\n if deposit:\n trades = deposit.Trades()\n if trades:\n LOGGER.info('The following trade has been created:{}\\n'.format(trades[0].Oid()))\n else:\n raise RuntimeError('Could not create trade!')\n else:\n raise RuntimeError('Could not create deposit!')", "def __init__(__self__, *,\n end_date: str,\n start_date: str,\n time: str):\n pulumi.set(__self__, \"end_date\", end_date)\n pulumi.set(__self__, \"start_date\", start_date)\n pulumi.set(__self__, \"time\", time)", "def billing_choose_dates(self):\n number_of_dates_to_be_generated_per_patient = (\n self.number_of_dates_to_be_generated_per_patient\n )\n dunning_cycle_length = self.dunning_cycle_length\n dates = self.dates\n first_date = random.choice(\n dates\n ) # randomly choose a start date from the list of possible start dates\n last_possible_date = first_date + datetime.timedelta(\n days=dunning_cycle_length\n ) # calculate the last date possible based on Dunnin Cycle\n time_between_dates = last_possible_date - first_date\n subsequent_events = random.sample(\n list(np.arange(0, time_between_dates.days)),\n number_of_dates_to_be_generated_per_patient,\n )\n subsequent_events.sort()\n dates = [\n first_date + datetime.timedelta(days=np.int(subsequent_event))\n for subsequent_event in subsequent_events\n ]\n event_list = pd.DataFrame(dates)\n return event_list", "def select_data(data=pd.DataFrame(), date_initial=\"2005-01-01\", date_final=\"2019-12-31\"):\n data = data[data.index >= date_initial]\n data = data[data.index <= date_final]\n return data", "def filter_data_by_date(df, ticker, start_date, end_date):\n if start_date is None:\n start_date = MIN_DATE\n\n if end_date is None:\n end_date = MAX_DATE\n\n filtered = df[\n (df[\"ticker\"] == ticker) & (df[\"date\"] >= start_date) & (df[\"date\"] <= end_date)\n ]\n return filtered", "def __init__(self, start_date_str: str, end_date_str: str):\r\n start_date, end_date = create_date_from_string(start_date_str, end_date_str)\r\n if is_date_valid(start_date, end_date):\r\n self.days_range_array = create_days_range(start_date, end_date)\r\n self.months_range_array = create_months_range(self.days_range_array)\r\n else:\r\n raise Exception", "def test_time_series_intraday_date_integer(self, mock_request):\n ts = TimeSeries(key=TestAlphaVantage._API_KEY_TEST,\n output_format='pandas', indexing_type='integer')\n url = \"http://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=1min&outputsize=full&apikey=test&datatype=json\"\n path_file = self.get_file_from_url(\"mock_time_series\")\n with open(path_file) as f:\n mock_request.get(url, text=f.read())\n data, _ = ts.get_intraday(\n \"MSFT\", interval='1min', outputsize='full')\n assert type(data.index[0]) == int", "def getEPADailyData(dateint, dt_ind, month, epa_df, yr):\n\n try:\n start = dateint + dt_ind * 10000\n end = start + 10001\n dly_epa_df = epa_df[(epa_df.created >= start) & (epa_df.created < end)]\n dly_epa_df.reset_index(inplace=True, drop=True)\n\n new_df = pd.DataFrame(columns=['lat', 'lon', 'utc', 'parameter', 'epa_pm25_unit', 'epa_pm25_value', 'raw_concentration', 'aqi', 'category', 'site_name', 'agency_name', 'full_aqs_code', 'intl_aqs_code', 'created'])\n for sitenm in dly_epa_df.site_name.unique():\n indx_ct = 0\n site_df = dly_epa_df[dly_epa_df.site_name == sitenm]\n for i in site_df.created.unique():\n indx_ct += 1\n new_df = pd.concat([new_df,site_df.iloc[indx_ct - 1:indx_ct]],ignore_index=True)\n\n if i != site_df.created.max(): # Don't interpolate the last record\n tmp_df = site_df.iloc[indx_ct - 1:indx_ct][['lat', 'lon', 'utc', 'parameter', 'epa_pm25_unit', 'category', 'site_name', 'agency_name', 'full_aqs_code', 'intl_aqs_code']]\n for j in range(1,6):\n new_dt = i + j * 10\n tmp_df['created'] = int(new_dt)\n tmp_df['epa_pm25_value'] = np.nan\n tmp_df['raw_concentration'] = np.nan\n tmp_df['aqi'] = np.nan\n new_df = pd.concat([new_df,tmp_df],ignore_index=True)\n\n # Convert aqi to numerica for so that it gets interpolated\n new_df[['aqi']] = new_df[['aqi']].replace(\"nan\", np.nan, regex=True)\n new_df[['aqi']] = new_df[['aqi']].apply(pd.to_numeric)\n\n new_df = new_df.interpolate(method='linear', limit_direction='forward', axis=0)\n\n int_epa_df = new_df[(new_df.created >= start) & (new_df.created < (end - 1))]\n int_epa_df.reset_index(inplace=True, drop=True)\n \n # Write to S3\n s3 = s3fs.S3FileSystem()\n myopen = s3.open\n write('midscapstone-whos-polluting-my-air/EpaDaily/epa_20{2}{0}{1:02}.parquet'.format(month, dt_ind, yr), int_epa_df, compression='GZIP', open_with=myopen)\n s3_resource = boto3.resource('s3')\n s3_resource.Object('midscapstone-whos-polluting-my-air', 'EpaDaily/epa_20{2}{0}{1:02}.parquet'.format(month, dt_ind, yr)).Acl().put(ACL='public-read')\n\n except Exception as e:\n print(\"*** EXCEPTION IN GET EPA DAILY DATA *** {}\".format(e))\n return int_epa_df", "def setup_base_settings(\n self,\n number_of_dates_to_be_generated_per_patient,\n dunning_cycle_length,\n possible_invoice_dates,\n customer_ids,\n dob_range,\n cpt_codes,\n distributions,\n ):\n self.number_of_dates_to_be_generated_per_patient = (\n number_of_dates_to_be_generated_per_patient\n )\n self.dunning_cycle_length = dunning_cycle_length\n self.dates = possible_invoice_dates\n self.customerIds = customer_ids\n self.consecutive = check_consecutive(self.customerIds)\n if self.consecutive == False:\n print(\"Error setting up the object- customerIds aren't consecutive\")\n print(self.customerIds)\n self.dobs = pd.date_range(\n start=dob_range[\"start_dob\"],\n end=dob_range[\"end_dob\"],\n periods=len(self.customerIds),\n ).date # range of valid dates of birth for the patients\n self.CPTCodes = cpt_codes # CPT codes to choose from\n self.invoices = [\n 10000\n ] # first invoice id- other invoices are monotonically increasing i.e. generated by adding one to the previous invoice.\n\n # dictionary used to define the assumptions used in generating the data set\n self.distributions = distributions\n return True", "def working_data(df, date_of_interest, lower_window, upper_window):\n\n # Actual dates we are interested in\n lower_date = date_of_interest - timedelta(days=lower_window)\n upper_date = date_of_interest + timedelta(days=upper_window)\n\n # Specs want us to call more than that\n lower_date_extreme = date_of_interest - timedelta(days=(2 * lower_window + 1))\n upper_date_extreme = date_of_interest + timedelta(days=(2 * upper_window))\n\n # Tighten to the range we want (and show non-trading days too)\n df = df.reindex(pd.date_range(lower_date_extreme, upper_date_extreme, freq='D'))\n df = df.rename(columns={'volume': 'Volume'})\n df['Volume'] = df['Volume'].fillna(0)\n df['close'] = df['close'].fillna(method='ffill')\n\n # Tag with relative dates\n df = df.apply(tag_relative_date, axis=1, args=(date_of_interest, lower_date, upper_date))\n\n # Calculate the data we want\n df['Return'] = df['close'].diff()\n df['Return_pct'] = df['close'].pct_change()\n df['Daily_Spread'] = df['high'] - df['low']\n df['Daily_Spread'] = df['Daily_Spread'].fillna(0)\n\n return df", "def date_range(self):\n start_date = input(\"Enter a start date in the format DD/MM/YYYY> \")\n end_date = input(\"Enter an end date in the format DD/MM/YYYY> \")\n return start_date, end_date", "def test_output_day(self):\n input_ = [\n self.indicator_record(date=datetime.date(2011, 1, 1), value=0.83),\n self.indicator_record(date=datetime.date(2011, 2, 1), value=0.80),\n ]\n output = self.expander._ipca_from_15_expander(input_)\n\n self.assertEqual(output[-1].date.day, 1)", "def test_aggr_date_input(self):\n\n actual_start_date = set([])\n actual_end_date = set([])\n for year in self.years:\n for my_date in self.dates:\n input_date = date(year, my_date[0], my_date[1])\n retail_date = RetailDate(input_date)\n actual_start_date.add(retail_date.year_start_date)\n actual_end_date.add(retail_date.year_end_date)\n\n # Verify the retail start dates\n expected_start = set([date(mTup[0], mTup[1], mTup[2]) for mTup in self.retail_start_dates])\n diff = expected_start.symmetric_difference(actual_start_date)\n self.assertEqual(len(diff), 0, \"Diff: \" + str(diff))\n\n # Verify the retail end dates\n expected_end = set([date(mTup[0], mTup[1], mTup[2]) for mTup in self.retail_end_dates])\n diff = expected_end.symmetric_difference(actual_end_date)\n self.assertEqual(len(diff), 0, \"Diff: \" + str(diff))", "def test_rise_timeseries_with_expert_model_for_correct_max_and_min():\n hot_day_index = 6\n cold_day_index = 12\n temperature_timeseries = average_temperature_timeseries_with_1_cold_and_1_hot_day(cold_day_index, hot_day_index)\n\n summer_explanation, winter_explanation = dianna.explain_timeseries(run_expert_model,\n timeseries_data=temperature_timeseries,\n method='rise',\n labels=[0, 1],\n p_keep=0.1, n_masks=10000,\n mask_type=input_train_mean)\n\n assert np.argmax(summer_explanation) == hot_day_index\n assert np.argmin(summer_explanation) == cold_day_index\n assert np.argmax(winter_explanation) == cold_day_index\n assert np.argmin(winter_explanation) == hot_day_index", "def visitPeriod(self, date):\n raise NotImplementedError()", "def test_first(self):\n start_date, end_date = get_quarterspan(datetime.date(1980, 1, 1))\n self.assertTrue(start_date < end_date)\n self.assertEqual(start_date, datetime.date(1980, 1, 1))\n self.assertEqual(end_date, datetime.date(1980, 3, 31))\n\n start_date, end_date = get_quarterspan(datetime.date(1980, 2, 29))\n self.assertTrue(start_date < end_date)\n self.assertEqual(start_date, datetime.date(1980, 1, 1))\n self.assertEqual(end_date, datetime.date(1980, 3, 31))\n\n start_date, end_date = get_quarterspan(datetime.date(1980, 3, 31))\n self.assertTrue(start_date < end_date)\n self.assertEqual(start_date, datetime.date(1980, 1, 1))\n self.assertEqual(end_date, datetime.date(1980, 3, 31))", "def _rate_dates(self, common_object):\n if common_object.IsKindOf(acm.FCashFlow):\n start_date = common_object.StartDate()\n elif common_object.IsKindOf(acm.FReset):\n start_date = common_object.Day()\n else:\n message = \"Rate dates for {0} object are not defined\".format(\n type(common_object))\n raise ProvisionHandlerError(message)\n\n end_date = acm.Time().DateAddDelta(start_date, 0, 3, 0)\n end_date = self._adjust_to_banking_day(end_date)\n\n return (start_date, end_date)", "def test_date_range():\n year = 2012\n cres_m = get_curtailment(year, curt_fn='curtailment.json')[0]\n cres_dr = get_curtailment(year, curt_fn='curtailment_date_range.json')[0]\n for df_res, site in cres_m:\n gid = int(site.name)\n assert np.allclose(df_res['windspeed'], cres_dr[gid]['windspeed'])", "def slice(self, start_date, end_date = None):\n\n if end_date is None:\n end_date = self.series.index[-1]\n self.series = self.series.loc[start_date:end_date]", "def RisetimeFinder(X, Y,startIndex,peakIndex,baseline):\n # Channel1Data is from first TOF\n # Channel2Data is from second TOF\n hitAmplitude = Y[peakIndex]\n UpperThreshold = baseline - (.7 * (baseline - hitAmplitude))\n LowerThreshold = baseline - (.3 * (baseline - hitAmplitude))\n riseTimestart = 0\n riseTimeend = 0\n riseIndex = 0\n fallIndex = 0\n diffs = Y[startIndex:peakIndex]-UpperThreshold\n value = np.min(abs(diffs))\n noiserms = np.std(Y[:50])*5\n YStart = Y[startIndex]\n YSign =np.sign(Y[startIndex])\n #print(value,diffs)\n #print(np.where(value == abs(diffs))[0][0])\n riseIndex = int(np.where(value == abs(diffs))[0][0]) + startIndex\n diffs = Y[startIndex:peakIndex]-LowerThreshold\n value = np.min(abs(diffs))\n fallIndex = int(np.where(value == abs(diffs))[0][0]) + startIndex\n riseTimestart = Interpolator(X, Y, riseIndex-1,riseIndex+1,UpperThreshold)\n riseTimeend = Interpolator(X, Y, fallIndex-1,fallIndex+1,LowerThreshold)\n #print(UpperThreshold,LowerThreshold)\n result = dict()\n result['risetime'] = riseTimestart-riseTimeend\n result['starttime'] = riseTimeend\n if riseTimestart < X[startIndex] or riseTimestart > X[EndIndex] or riseTimeend < X[startIndex] or riseTimeend > X[EndIndex]:\n result['risetime']= False\n if riseTimestart - riseTimeend > (X[EndIndex] - X[startIndex]):\n result['risetime']= False\n if riseTimestart - riseTimeend <= 0:\n result['risetime']= False\n if riseIndex == 0 or fallIndex ==0:\n result['risetime']= False\n if YSign > 0:\n if(YStart > baseline + noiserms):\n result['risetime']= False\n if YSign < 0:\n if(YStart < baseline - noiserms):\n result['risetime']= False\n if len(np.unique(np.sign(np.diff(Y[fallIndex:startIndex])))) > 1:\n result['risetime']= False\n\n return result", "def visitRange(self, date):\n raise NotImplementedError()", "def test_change_of_year(self):\n\n input_ = [\n self.indicator_record(date=datetime.date(2006, 11, 1), value=0.31),\n self.indicator_record(date=datetime.date(2006, 12, 1), value=0.48),\n ]\n output = self.expander._ipca_from_15_expander(input_)\n expected = self.indicator_record(date=datetime.date(2007, 1, 1), value=0.35)\n actual = output[-1]\n\n self.assertEqual(expected, actual)", "def _select_ds(self, first_date: np.datetime64,\n last_date: np.datetime64) -> xr.Dataset:\n first_ts = self._floor_to_dt(first_date)\n last_ts = self._floor_to_dt(last_date) + self._dt\n first_month = first_ts.astype(\"M8[M]\")\n last_month = last_ts.astype(\"M8[M]\")\n ts = self._ts[\"date\"]\n if first_month < ts[0] or last_month > ts[-1]:\n upper_limit = (ts[-1] + np.timedelta64(1, 'M')).astype(\"M8[s]\")\n raise IndexError(\n f\"period [{first_date}, {last_date}] is out of range: \"\n f\"[{ts[0].astype('M8[s]')}, {upper_limit}[\")\n mask = (ts >= first_month) & (ts <= last_month)\n\n paths = self._ts[\"path\"][mask]\n ds = xr.open_dataset(paths[0]).isel(ocean_time=slice(0, -1, None))\n z0 = ds.sel(ocean_time=slice(first_ts, last_ts))\n\n if len(paths) > 1:\n ds = xr.open_dataset(paths[1]).isel(ocean_time=slice(0, -1, None))\n z1 = ds.sel(ocean_time=slice(first_ts, last_ts))\n return xr.concat([z0, z1], dim=\"ocean_time\")\n return z0", "def date_range_filter(dr):\n assert IDateRange.providedBy(dr) or IDateRangeFactory.providedBy(dr)\n if IDateRangeFactory.providedBy(dr):\n dr = dr(datetime.now())\n factory = queryUtility(IFactory, dottedname(IQueryFilter))\n if factory is None:\n return ComponentLookupError('cannot find factory for query filter')\n return factory(value=(dr.start, dr.end), query_range=dr.query_range)", "def get_start_and_end_dates(new_start_date=None):\n curr_date = datetime.utcnow()\n curr_date = pd.to_datetime(date(curr_date.year, curr_date.month, curr_date.day))\n if not(new_start_date):\n end_date = curr_date\n start_date = get_start_date(end_date, ANALYSIS_PERIOD)\n else:\n start_date = new_start_date\n end_date = curr_date\n \n start_date = start_date.replace(tzinfo=timezone.utc)\n end_date = end_date.replace(tzinfo=timezone.utc)\n return start_date, end_date", "def loop_observations ( self, start_date, end_date, step=1, fmt=\"%Y-%m-%d\" ):\n\n start_date = datetime.datetime.strptime( start_date, fmt )\n end_date = datetime.datetime.strptime( end_date, fmt )\n if start_date < self.date[0]:\n print \"No observations until %s, starting from there\" % self.date[0]\n start_date = self.date[0]\n\n if end_date > self.date[-1]:\n print \"No observations after %s, stopping there\" % self.date[-1]\n end_date = self.date[-1]\n\n delta = datetime.timedelta ( days=step )\n this_date = start_date.date()\n end_date = end_date.date() + delta\n obs_dates = [ x.date() for x in self.date ]\n while this_date < end_date:\n if this_date in obs_dates:\n iloc = obs_dates.index ( this_date )\n have_obs = True\n the_data = self._data_pntr[iloc].ReadAsArray()\n try:\n the_mask = map ( *self.masks[iloc] )\n except:\n the_mask = self.get_mask ( iloc )\n the_emulator = self.emulator[ iloc ]\n the_sza = self.sza[ iloc ]\n the_saa = self.saa[ iloc ]\n the_vza = self.vza[ iloc ]\n the_vaa = self.vaa[ iloc ]\n the_fname = self._data_pntr[iloc].GetDescription()\n try:\n the_sensor = self.sensor[iloc]\n except:\n the_sensor = self.sensor\n try:\n the_spectrum = self.spectral[iloc]\n except:\n the_spectrum = self.spectral\n\n else:\n have_obs = False\n the_data = None\n the_mask = None\n the_emulator = None\n the_sza = None\n the_saa = None\n the_vza = None\n the_vaa = None\n the_fname = None\n the_spectrum = None\n the_sensor = None\n this_date += delta\n retval = namedtuple ( \"retval\", [\"have_obs\", \"sensor\", \"date\", \"image\", \"mask\", \"emulator\",\n \"sza\", \"saa\", \"vza\", \"vaa\", \"fname\", \"spectrum\"] )\n retvals = retval ( have_obs=have_obs, sensor=the_sensor, \n date=this_date - delta, image=the_data, mask=the_mask, emulator=the_emulator, sza=the_sza,\n saa=the_saa, vza=the_vza, vaa=the_vaa, fname=the_fname, spectrum=the_spectrum )\n yield retvals", "def test_fill_data_with_one_date(self):\n # date = pd.to_datetime('2015-06-30')\n date = pd.to_datetime('2011-05-09')\n print 'testing date: %s' % date.strftime('%Y-%m-%d')\n self.full_iv.get_data()\n\n # df_date = self.full_iv.df_all.query('date == %r' % date)\n # df_date = df_date[['date', 'dte', 'mark', 'strike', 'impl_vol']]\n # print df_date.sort_values(['dte', 'strike']).to_string(line_width=1000)\n\n self.full_iv.df_stock = self.full_iv.df_stock[date:date]\n df_iv = self.full_iv.calc_iv()\n\n print df_iv\n\n self.assertTrue(len(df_iv))", "def test_range():\n begin_date = datetime.datetime(2000, 1, 1)\n end_date = datetime.datetime.today()\n\n if os.path.isfile(\"spy_price_cache_\" + str(datetime.date.today()) + \".csv\"):\n dates_available = pickle.load(open(\"spy_price_cache_\" + str(datetime.date.today()) + \".csv\", \"r\"))\n else:\n prices_available = yahoo.webload_symbol_price(\"SPY\", begin_date, end_date)\n dates_available = set(timestamp.to_pydatetime() for timestamp in prices_available.index.tolist())\n pickle.dump(dates_available, open(\"spy_price_cache_\" + str(datetime.date.today()) + \".csv\", \"w\"))\n\n dates_expected = set([day for day in itertools.takewhile(\n lambda d: d <= end_date,\n CALENDAR.every_nth_between(begin_date, end_date, 1)\n )])\n\n dates_misaligned = dates_available.symmetric_difference(dates_expected)\n\n assert len(dates_misaligned) == 0", "def _exclude_dates(self, X, y, exclude_dates):\n self.exclude_dates = exclude_dates\n if len(self.exclude_dates) != 0:\n for exclude_date_range in self.exclude_dates:\n t0,t1 = [datetimeify(dt) for dt in exclude_date_range]\n inds = (y.index<t0)|(y.index>=t1)\n X = X.loc[inds]\n y = y.loc[inds]\n return X,y", "def set_date_range(self, start_date, end_date):\n self._validate_date_range(start_date, end_date)\n self.start_date = pd.Timestamp(start_date)\n self.end_date = pd.Timestamp(end_date)", "def retrieve_observations_and_simsurvey(\n session,\n start_date,\n end_date,\n localization_id,\n instrument_id,\n survey_efficiency_analysis_id,\n survey_efficiency_analysis_type,\n):\n\n if survey_efficiency_analysis_type == \"SurveyEfficiencyForObservations\":\n survey_efficiency_analysis = session.scalars(\n sa.select(SurveyEfficiencyForObservations).where(\n SurveyEfficiencyForObservations.id == survey_efficiency_analysis_id\n )\n ).first()\n if survey_efficiency_analysis is None:\n raise ValueError(\n f'No SurveyEfficiencyForObservations with ID {survey_efficiency_analysis_id}'\n )\n elif survey_efficiency_analysis_type == \"SurveyEfficiencyForObservations\":\n survey_efficiency_analysis = session.scalars(\n sa.select(SurveyEfficiencyForObservationPlan).where(\n SurveyEfficiencyForObservationPlan.id == survey_efficiency_analysis_id\n )\n ).first()\n if survey_efficiency_analysis is None:\n raise ValueError(\n f'No SurveyEfficiencyForObservationPlan with ID {survey_efficiency_analysis_id}'\n )\n else:\n raise ValueError(\n 'survey_efficiency_analysis_type must be SurveyEfficiencyForObservations or SurveyEfficiencyForObservationPlan'\n )\n\n payload = survey_efficiency_analysis.payload\n\n instrument = session.scalars(\n sa.select(Instrument)\n .options(joinedload(Instrument.telescope))\n .where(Instrument.id == instrument_id)\n ).first()\n\n localization = session.scalars(\n sa.select(Localization).where(Localization.id == localization_id)\n ).first()\n\n data = get_observations(\n session,\n start_date,\n end_date,\n telescope_name=instrument.telescope.name,\n instrument_name=instrument.name,\n localization_dateobs=localization.dateobs,\n localization_name=localization.localization_name,\n localization_cumprob=payload[\"localization_cumprob\"],\n )\n\n observations = data[\"observations\"]\n\n if len(observations) == 0:\n raise ValueError('Need at least one observation to run SimSurvey')\n\n unique_filters = list({observation[\"filt\"] for observation in observations})\n\n if not set(unique_filters).issubset(set(instrument.sensitivity_data.keys())):\n raise ValueError('Need sensitivity_data for all filters present')\n\n for filt in unique_filters:\n if not {'exposure_time', 'limiting_magnitude', 'zeropoint'}.issubset(\n set(instrument.sensitivity_data[filt].keys())\n ):\n raise ValueError(\n f'Sensitivity_data dictionary missing keys for filter {filt}'\n )\n\n # get height and width\n stmt = (\n InstrumentField.select(session.user_or_token)\n .where(InstrumentField.id == observations[0][\"field\"][\"id\"])\n .options(undefer(InstrumentField.contour_summary))\n )\n field = session.scalars(stmt).first()\n if field is None:\n raise ValueError(\n 'Missing field {obs_dict[\"field\"][\"id\"]} required to estimate field size'\n )\n contour_summary = field.to_dict()[\"contour_summary\"][\"features\"][0]\n coordinates = np.squeeze(np.array(contour_summary[\"geometry\"][\"coordinates\"]))\n coords = SkyCoord(\n coordinates[:, 0] * u.deg, coordinates[:, 1] * u.deg, frame='icrs'\n )\n width, height = None, None\n for c1 in coords:\n for c2 in coords:\n dra, ddec = c1.spherical_offsets_to(c2)\n dra = dra.to(u.deg)\n ddec = ddec.to(u.deg)\n if width is None and height is None:\n width = dra\n height = ddec\n else:\n if dra > width:\n width = dra\n if ddec > height:\n height = ddec\n\n observation_simsurvey(\n observations,\n localization.id,\n instrument.id,\n survey_efficiency_analysis_id,\n survey_efficiency_analysis_type,\n width=width.value,\n height=height.value,\n number_of_injections=payload['number_of_injections'],\n number_of_detections=payload['number_of_detections'],\n detection_threshold=payload['detection_threshold'],\n minimum_phase=payload['minimum_phase'],\n maximum_phase=payload['maximum_phase'],\n model_name=payload['model_name'],\n optional_injection_parameters=payload['optional_injection_parameters'],\n )", "def test_time_series_intraday_date_indexing(self, mock_request):\n ts = TimeSeries(key=TestAlphaVantage._API_KEY_TEST,\n output_format='pandas', indexing_type='date')\n url = \"http://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=1min&outputsize=full&apikey=test&datatype=json\"\n path_file = self.get_file_from_url(\"mock_time_series\")\n with open(path_file) as f:\n mock_request.get(url, text=f.read())\n data, _ = ts.get_intraday(\n \"MSFT\", interval='1min', outputsize='full')\n if sys.version_info[0] == 3:\n assert isinstance(data.index[0], str)\n else:\n assert isinstance(data.index[0], basestring)", "def zonedetails_nearest_range(zone_code, start_datetime=None):\n if start_datetime:\n timenow = start_datetime.time()\n day = start_datetime.weekday()\n else:\n # datetime.time(15, 29, 22, 129537)\n timenow = datetime.now().time()\n # Return the day of the week as an integer, where Monday is 0 and Sunday is 6\n day = datetime.today().weekday()\n\n qset_zonedetails_today = zone_details_for_day(zone_code,day)\n\n if not qset_zonedetails_today:\n raise Exception(\"No hay detalles cargado para hoy, revisar\")\n\n # Traverse elements to guess in which we are.If we are at the end of a day\n # we obtain the first interval of the next day.\n # For example, a \"graph\" of what would be the range of what is it charged\n # when parking in the day\n # 00hs 8hs 13hs 14hs 20hs 23.59hs\n # ----------|||||||||||-------|||||||||||||||-----------\n zonedetails = None\n for item in qset_zonedetails_today:\n end = item.hour_end\n # See if we are before or after or inside range of current elem.\n\n # As they are ordered by time, we check if we are before the end of the period\n # as if we are before it starts, we will pick that as wll.\n # (and as they are ordered, if one element has passed, it means that it is\n # after the one that has not alreay passed the condition.\n if timenow < end:\n zonedetails = item\n break;\n\n # This means that now, is after the last interval where the user hasto pay and the end of today\n # So we pick the first of next day\n if zonedetails is None:\n day += 1\n qset_zonedetails_tomorrow = zone_details_for_day(zone_code,day)\n zonedetails = qset_zonedetails_tomorrow.first()\n\n return zonedetails", "def get_time_series(for_date, instr, spec):\n return acm.FTimeSeries.Select01(\"day = '%s' and recaddr = %i \"\n \"and timeSeriesSpec = %i and runNo = 1\"\n % (for_date, instr.Oid(), spec.Oid()), '')", "def daily_insolation_limits(irrad, clearsky, daily_min=0.4, daily_max=1.25):\n daily_irradiance = _daily_total(irrad)\n daily_clearsky = _daily_total(clearsky)\n good_days = quality.util.check_limits(\n daily_irradiance/daily_clearsky,\n upper_bound=daily_max,\n lower_bound=daily_min\n )\n return good_days.reindex(irrad.index, method='pad', fill_value=False)", "def __init__(\n self,\n name: str,\n source: str,\n start_date: np.ndarray,\n end_date: np.ndarray,\n ):\n super().__init__(name, source, start_date)\n self.end_date = end_date", "def simulator_from_instrument(instrument):\r\n\r\n grid = grid_from_instrument(instrument=instrument)\r\n psf = psf_from_instrument(instrument=instrument)\r\n\r\n if instrument in \"vro\":\r\n return ag.SimulatorImaging(\r\n exposure_time_map=ag.Array2D.full(\r\n fill_value=100.0, shape_native=grid.shape_native\r\n ),\r\n psf=psf,\r\n background_sky_map=ag.Array2D.full(\r\n fill_value=1.0, shape_native=grid.shape_native\r\n ),\r\n add_poisson_noise=True,\r\n )\r\n elif instrument in \"euclid\":\r\n return ag.SimulatorImaging(\r\n exposure_time_map=ag.Array2D.full(\r\n fill_value=2260.0, shape_native=grid.shape_native\r\n ),\r\n psf=psf,\r\n background_sky_map=ag.Array2D.full(\r\n fill_value=1.0, shape_native=grid.shape_native\r\n ),\r\n add_poisson_noise=True,\r\n )\r\n elif instrument in \"hst\":\r\n return ag.SimulatorImaging(\r\n exposure_time_map=ag.Array2D.full(\r\n fill_value=2000.0, shape_native=grid.shape_native\r\n ),\r\n psf=psf,\r\n background_sky_map=ag.Array2D.full(\r\n fill_value=1.0, shape_native=grid.shape_native\r\n ),\r\n add_poisson_noise=True,\r\n )\r\n elif instrument in \"hst_up\":\r\n return ag.SimulatorImaging(\r\n exposure_time_map=ag.Array2D.full(\r\n fill_value=2000.0, shape_native=grid.shape_native\r\n ),\r\n psf=psf,\r\n background_sky_map=ag.Array2D.full(\r\n fill_value=1.0, shape_native=grid.shape_native\r\n ),\r\n add_poisson_noise=True,\r\n )\r\n elif instrument in \"ao\":\r\n return ag.SimulatorImaging(\r\n exposure_time_map=ag.Array2D.full(\r\n fill_value=1000.0, shape_native=grid.shape_native\r\n ),\r\n psf=psf,\r\n background_sky_map=ag.Array2D.full(\r\n fill_value=1.0, shape_native=grid.shape_native\r\n ),\r\n add_poisson_noise=True,\r\n )\r\n else:\r\n raise ValueError(\"An invalid instrument was entered - \", instrument)", "def test_initial_records_are_preserved(self):\n input_ = [\n self.indicator_record(date=datetime.date(1998, 5, 1), value=0.50),\n self.indicator_record(date=datetime.date(1998, 6, 1), value=0.02),\n self.indicator_record(date=datetime.date(1998, 7, 1), value=-0.12),\n ]\n records = self.expander._ipca_from_15_expander(input_)\n\n same_date_values = [record.date == records[index_].date and\n record.value == records[index_].value\n for index_, record in enumerate(input_)]\n\n self.assertTrue(all(same_date_values))", "def new_infection_events_Garki(ind_df):\r\n first_positive = []\r\n lapsed_positive = []\r\n first_malaria_free_date = None # first time point is left-censored on malaria-free interval\r\n for ind, measurements in ind_df.iterrows():\r\n # initialize positive flag as False\r\n\r\n positive = False\r\n if measurements.asexual_density > 0:\r\n positive = True\r\n elif measurements.gametocyte_density > 0:\r\n positive = True\r\n if first_malaria_free_date is None:\r\n if positive:\r\n first_malaria_free_date = None\r\n else:\r\n first_malaria_free_date = measurements.datecoll\r\n else:\r\n interval = (measurements.datecoll - first_malaria_free_date) / np.timedelta64(1, 'D')\r\n if positive:\r\n time_since_last = (measurements.datecoll - ind_df.loc[ind-1,:].datecoll) / np.timedelta64(1, 'D')\r\n first_malaria_free_date = None\r\n\r\n if time_since_last < 120:\r\n\r\n first_positive.append({'date': measurements.datecoll, 'area': measurements.area, 'vname':measurements.vname,\r\n 'age': measurements.age, 'age_at_enroll':measurements.age_at_enroll,'IRS_status':measurements.IRS_status,'id': measurements.id, 'fever': measurements.fever,\r\n 'asexual_density': (float(measurements.pfa)/float(measurements.exam))*200, 'gametocyte_density': (float(measurements.pfg)/float(measurements.exam))*200, 'examined': measurements.exam, 'interval': interval, 'time_since_last': time_since_last,'frequency': measurements.infection_frequency})\r\n else:\r\n lapsed_positive.append({'date': measurements.datecoll, 'area': measurements.area,'vname':measurements.vname,\r\n 'age': measurements.age, 'age_at_enroll':measurements.age_at_enroll,'IRS_status':measurements.IRS_status,'id': measurements.id, 'fever': measurements.fever,\r\n 'asexual_density': (float(measurements.pfa)/float(measurements.exam))*200, 'gametocyte_density': (float(measurements.pfg)/float(measurements.exam))*200, 'examined': measurements.exam, 'interval': interval,'time_since_last': time_since_last,'frequency': measurements.infection_frequency})\r\n if measurements.IRS_status == 'post_IRS':\r\n break\r\n else:\r\n pass\r\n\r\n return first_positive,lapsed_positive", "def get_dummy_data(num_days, low, high, end_date='1970-01-01'):\n step = (high - low) / (num_days - 1)\n ref = datetime.strptime(end_date, '%Y-%m-%d').date()\n start_dt = ref - timedelta(days=(num_days - 1))\n end_dt = ref + timedelta(days=1)\n ts = np.arange(start_dt, end_dt, timedelta(days=1)).astype(date)\n df = pd.DataFrame(data={'price': np.arange(low, high + 1, step)}, index=pd.DatetimeIndex(ts))\n df.index.name = 'date'\n return df", "def from_start_date_to_end_date(start, end):\n\n first_canonicalized = start.replace(\" \", \"\").lower()\n second_canonicalized = end.replace(\" \", \"\").lower()\n first_search_date = start.replace(\" \", \"\").lower()\n second_search_date = end.replace(\" \", \"\").lower() \n all_dates_between_start_date_and_end_date = [multiple_dates for multiple_dates in temperature_parameters_list if multiple_dates[\"date\"\n ] >= first_search_date and multiple_dates[\"date\"] <= second_search_date]\n \n if first_search_date == first_canonicalized and second_search_date == second_canonicalized:\n return jsonify(all_dates_between_start_date_and_end_date)\n\n return jsonify({\"error\": f\"{start} and {end} not found.\"}), 404", "def createIndoorTimeseries(ts, insulation):\r\n result = ts.copy()\r\n result.correct(-INSULATIONS[insulation])\r\n result.addLevel(BACKGROUND)\r\n return result", "def populate_agdds(start_date, end_date, source, source_id, stations):\r\n # possibly grab ACIS station data (for entire date range)\r\n if source == 'ACIS':\r\n station_ids = []\r\n for station in stations:\r\n station_ids.append(station['char_network_id'])\r\n acis_data = get_acis_climate_data(\",\".join(station_ids), 'mint,maxt,gdd32,gdd50', start_date, end_date)\r\n\r\n for station in stations:\r\n print(station['char_network_id'])\r\n # grab previous days tmin, tmax, and agdd for both bases from mysql agdds table and start over at year breaks\r\n day_before_start_date = start_date - timedelta(days=1)\r\n if day_before_start_date.year == start_date.year:\r\n prev_tmin = get_element_from_qc_table(station['station_id'], source_id, day_before_start_date, 32, 'tmin')\r\n prev_tmax = get_element_from_qc_table(station['station_id'], source_id, day_before_start_date, 32, 'tmax')\r\n agdd32 = get_element_from_qc_table(station['station_id'], source_id, day_before_start_date, 32, 'agdd')\r\n agdd50 = get_element_from_qc_table(station['station_id'], source_id, day_before_start_date, 50, 'agdd')\r\n else:\r\n prev_tmin = None\r\n prev_tmax = None\r\n agdd32 = None\r\n agdd50 = None\r\n\r\n if prev_tmin is None or prev_tmin == 'M':\r\n prev_tmin = 0\r\n if prev_tmax is None or prev_tmax == 'M':\r\n prev_tmax = 0\r\n if agdd32 is None or agdd32 == 'M':\r\n agdd32 = 0\r\n if agdd50 is None or agdd50 == 'M':\r\n agdd50 = 0\r\n\r\n # possibly find station of interest from ACIS retrieved data\r\n acis_station = None\r\n if source == 'ACIS':\r\n station_found = False\r\n for a_station in acis_data['data']:\r\n if station_found:\r\n break\r\n for sid in a_station['meta']['sids']:\r\n # print(sid)\r\n # print(station['char_network_id'])\r\n if station['char_network_id'] in sid:\r\n station_found = True\r\n acis_station = a_station\r\n break\r\n if not station_found:\r\n print(\"Could not find station \" + station['char_network_id'])\r\n\r\n previous_year = start_date.year\r\n delta = end_date - start_date\r\n for i in range(delta.days + 1):\r\n day = start_date + timedelta(days=i)\r\n doy = day.timetuple().tm_yday\r\n\r\n # reset the agdd to 0 if we go into a new year\r\n if previous_year != day.year:\r\n agdd32 = 0\r\n agdd50 = 0\r\n previous_year = day.year\r\n\r\n missing_data = False\r\n print(day.strftime(\"%Y-%m-%d\"))\r\n\r\n # see if we already have tmin and tmax from local db\r\n # tmin = None\r\n # tmax = None\r\n tmin = get_element_from_qc_table(station['station_id'], source_id, day, 32, 'tmin')\r\n tmax = get_element_from_qc_table(station['station_id'], source_id, day, 32, 'tmax')\r\n\r\n already_retrieved = False\r\n if tmin is not None and tmin != 'M' and tmax is not None and tmax != 'M' and source != 'PRISM':\r\n already_retrieved = True\r\n\r\n # don't already have tmin and tmax locally so grab from URMA postgis db or ACIS data\r\n if not already_retrieved:\r\n if source == 'URMA':\r\n if station['char_value'] == 'AK':\r\n tmin = get_urma_climate_data(station['longitude'], station['latitude'], day, 'tmin', 'alaska')\r\n tmax = get_urma_climate_data(station['longitude'], station['latitude'], day, 'tmax', 'alaska')\r\n else:\r\n tmin = get_urma_climate_data(station['longitude'], station['latitude'], day, 'tmin', 'conus')\r\n tmax = get_urma_climate_data(station['longitude'], station['latitude'], day, 'tmax', 'conus')\r\n # URMA and PRISM are in celsius in our postgis db everything else is Fer so convert here\r\n if tmin is not None:\r\n tmin = tmin * 1.8 + 32\r\n if tmax is not None:\r\n tmax = tmax * 1.8 + 32\r\n elif source == 'PRISM':\r\n tmin = get_prism_climate_data(station['longitude'], station['latitude'], day, 'tmin')\r\n tmax = get_prism_climate_data(station['longitude'], station['latitude'], day, 'tmax')\r\n if tmin is not None:\r\n tmin = tmin * 1.8 + 32\r\n if tmax is not None:\r\n tmax = tmax * 1.8 + 32\r\n elif acis_station is not None:\r\n tmin = acis_station['data'][i][0]\r\n tmax = acis_station['data'][i][1]\r\n\r\n # if tmin or tmax is missing, set to previous day's and mark as missing\r\n if tmin is not None and tmin != 'M':\r\n tmin = float(tmin)\r\n prev_tmin = tmin\r\n else:\r\n missing_data = True\r\n tmin = prev_tmin\r\n if tmax is not None and tmax != 'M':\r\n tmax = float(tmax)\r\n prev_tmax = tmax\r\n else:\r\n missing_data = True\r\n tmax = prev_tmax\r\n\r\n # compute gdd and agdd for both bases\r\n gdd32 = compute_gdd(tmin, tmax, 32)\r\n gdd50 = compute_gdd(tmin, tmax, 50)\r\n\r\n agdd32 += gdd32\r\n agdd50 += gdd50\r\n\r\n if not already_retrieved:\r\n # do an insert or update\r\n add_agdd_row(station['station_id'], source_id, gdd32, agdd32, day.year, doy, day, 32, missing_data, tmin, tmax)\r\n add_agdd_row(station['station_id'], source_id, gdd50, agdd50, day.year, doy, day, 50, missing_data, tmin, tmax)", "def compute(self, today, asset_ids, out, high):\n today_day = today.weekday()\n current_end_week_idx = today_day\n current_start_week_idx = 4 + today_day\n current_week_high = high[current_end_week_idx:current_start_week_idx, :].max(\n axis=0)\n # current_week_low = low[current_end_week_idx:current_start_week_idx, :].min(axis=0)\n out[:] = current_week_high", "def __add__ ( self, other, resample_opts=None ):\n result = ObservationStorage (datadir=self.datadir, \\\n resample_opts=resample_opts )\n if self.date[0] > other.date[0]:\n start_date = other.date[0]\n else:\n start_date = self.date[0]\n if self.date[-1] > other.date[-1]:\n end_date = other.date[-1]\n else:\n end_date = self.date[-1]\n \n delta = datetime.timedelta ( days=1 )\n this_date = start_date.date()\n end_date = end_date.date() + delta\n \n this_obs_dates = [ x.date() for x in self.date ]\n other_obs_dates = [ x.date() for x in other.date ]\n \n date = [] ; vza = [] ; vaa = [] ; sza = [] ; saa = []\n emulator = [] ; mask = [] ; data_pntr = [] ; spectral = []\n sensor = []\n \n while this_date < end_date:\n if this_date in this_obs_dates:\n iloc = this_obs_dates.index ( this_date )\n date.append ( self.date[iloc] )\n emulator.append ( self.emulator[iloc] )\n vza.append ( self.vza[iloc] )\n sza.append ( self.sza[iloc] )\n vaa.append ( self.vaa[iloc] )\n saa.append ( self.saa[iloc] )\n spectral.append ( self.spectral )\n mask.append ( ( self.get_mask, [iloc] ) )\n sensor.append ( self.sensor )\n \n data_pntr.append ( self._data_pntr[iloc] )\n if this_date in other_obs_dates:\n iloc = other_obs_dates.index ( this_date )\n date.append ( other.date[iloc] )\n emulator.append ( other.emulator[iloc] )\n vza.append ( other.vza[iloc] )\n sza.append ( other.sza[iloc] )\n vaa.append ( other.vaa[iloc] )\n saa.append ( other.saa[iloc] )\n spectral.append ( other.spectral )\n mask.append ( ( other.get_mask, [iloc] ) )\n sensor.append ( other.sensor )\n data_pntr.append ( other._data_pntr[iloc] )\n this_date += delta\n result.vza = vza\n result.vaa = vaa\n result.sza = sza \n result.saa = saa \n result.date = date\n result.spectral = spectral\n result.masks = mask\n result.sensor = sensor\n result.emulator = emulator\n result._data_pntr = data_pntr\n return result", "def get_school_year(data, date_col, data_path):\n\n # generate empty df with all dates between start and end\n start = data[date_col].min()\n end = data[date_col].max()\n df = pd.date_range(start, end, freq=\"D\").to_frame(index=False, name=\"date\")\n\n # read external holidays csv\n def _parser(date):\n return pd.to_datetime(date)\n\n holidays = pd.read_csv(f'{data_path}',\n parse_dates=['date_debut', 'date_fin'],\n date_parser=_parser)\n holidays = holidays[[\"annee_scolaire\", \"date_debut\", \"date_fin\"]]\n holidays = holidays.drop_duplicates()\n\n # simulate an interval based left join using pandas\n # perform a cross join on temp_key\n up_bound = \"date_fin\"\n low_bound = \"date_debut\"\n df['temp_key'] = 1\n holidays['temp_key'] = 1\n crossjoindf = pd.merge(df, holidays, on=['temp_key'])\n\n df.drop(columns=['temp_key'], inplace=True)\n crossjoindf.drop(columns=['temp_key'], inplace=True)\n \n # filter with lower_bound & upper_bound\n conditionnal_df = crossjoindf[\n (crossjoindf[\"date\"] >= crossjoindf[low_bound]) & (crossjoindf[\"date\"] <= crossjoindf[up_bound])]\n\n # merge on the main df with all cols as keys to simulate left join\n df_col = df.columns.values.tolist()\n conditionnal_df.set_index(df_col, inplace=True)\n df = df.merge(conditionnal_df, left_on=df_col, right_index=True, how='left')\n\n df.set_index('date', inplace=True) \n data = pd.merge(data, df['annee_scolaire'], on='date')\n \n return data", "def __init__(self, name, value, start_date, end_date, period, interest): \n SavingPlan.__init__(self, name, value, start_date, end_date, period)\n self.interest = interest", "def load_dataset(self, first_date: np.datetime64,\n last_date: np.datetime64) -> xr.Dataset:\n first_date = self._shift_date(first_date.astype(\"datetime64[ns]\"),\n -1, self.time_delta)\n last_date = self._shift_date(last_date.astype(\"datetime64[ns]\"), 1,\n self.time_delta)\n LOGGER.debug(\"Loading dataset from %s to %s\", first_date,\n last_date)\n\n if first_date < self.dataset.time[\n 0] or last_date > self.dataset.time[-1]:\n raise IndexError(\n f\"period [{first_date}, {last_date}] is out of range: \"\n f\"[{self.dataset.time[0]}, {self.dataset.time[-1]}]\")\n\n # Mask for selecting data covering the time period provided.\n mask = (self.dataset.time.data >=\n first_date) & (self.dataset.time.data <= last_date)\n return self.dataset.isel(time=np.argwhere(mask).squeeze())", "def get_scns_for_date(self, date_of_interest, valid=True, ard_prod=True, platform=None):\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n\n if platform is None:\n if valid and ard_prod:\n scns = ses.query(EDDSentinel1ASF).filter(\n sqlalchemy.cast(EDDSentinel1ASF.Acquisition_Date, sqlalchemy.Date) == date_of_interest,\n EDDSentinel1ASF.Invalid == False, EDDSentinel1ASF.ARDProduct == True).all()\n elif valid:\n scns = ses.query(EDDSentinel1ASF).filter(\n sqlalchemy.cast(EDDSentinel1ASF.Acquisition_Date, sqlalchemy.Date) == date_of_interest,\n EDDSentinel1ASF.Invalid == False).all()\n elif ard_prod:\n scns = ses.query(EDDSentinel1ASF).filter(\n sqlalchemy.cast(EDDSentinel1ASF.Acquisition_Date, sqlalchemy.Date) == date_of_interest,\n EDDSentinel1ASF.ARDProduct == True).all()\n else:\n scns = ses.query(EDDSentinel1ASF).filter(\n sqlalchemy.cast(EDDSentinel1ASF.Acquisition_Date, sqlalchemy.Date) == date_of_interest).all()\n else:\n if valid and ard_prod:\n scns = ses.query(EDDSentinel1ASF).filter(\n sqlalchemy.cast(EDDSentinel1ASF.Acquisition_Date, sqlalchemy.Date) == date_of_interest,\n EDDSentinel1ASF.Invalid == False, EDDSentinel1ASF.ARDProduct == True,\n EDDSentinel1ASF.Platform == platform).all()\n elif valid:\n scns = ses.query(EDDSentinel1ASF).filter(\n sqlalchemy.cast(EDDSentinel1ASF.Acquisition_Date, sqlalchemy.Date) == date_of_interest,\n EDDSentinel1ASF.Invalid == False, EDDSentinel1ASF.Platform == platform).all()\n elif ard_prod:\n scns = ses.query(EDDSentinel1ASF).filter(\n sqlalchemy.cast(EDDSentinel1ASF.Acquisition_Date, sqlalchemy.Date) == date_of_interest,\n EDDSentinel1ASF.ARDProduct == True, EDDSentinel1ASF.Platform == platform).all()\n else:\n scns = ses.query(EDDSentinel1ASF).filter(\n sqlalchemy.cast(EDDSentinel1ASF.Acquisition_Date, sqlalchemy.Date) == date_of_interest,\n EDDSentinel1ASF.Platform == platform).all()\n return scns", "def mast_query(instrument, templates, start_date, end_date, aperture=None, detector=None, filter_name=None,\n pupil=None, grating=None, readpattern=None, lamp=None):\n\n # If a single template name is input as a string, put it in a list\n if isinstance(templates, str):\n templates = [templates]\n\n # Make sure instrument is correct case\n instrument = JWST_INSTRUMENT_NAMES_MIXEDCASE[instrument.lower()]\n\n # instrument_inventory does not allow list inputs to\n # the added_filters input (or at least if you do provide a list, then\n # it becomes a nested list when it sends the query to MAST. The\n # nested list is subsequently ignored by MAST.)\n # So query once for each flat template, and combine outputs into a\n # single list.\n query_results = []\n for template_name in templates:\n\n # Create dictionary of parameters to add\n parameters = {\"date_obs_mjd\": {\"min\": start_date, \"max\": end_date},\n \"exp_type\": template_name}\n\n if detector is not None:\n parameters[\"detector\"] = detector\n if aperture is not None:\n parameters[\"apername\"] = aperture\n if filter_name is not None:\n parameters[\"filter\"] = filter_name\n if pupil is not None:\n parameters[\"pupil\"] = pupil\n if grating is not None:\n parameters[\"grating\"] = grating\n if readpattern is not None:\n parameters[\"readpatt\"] = readpattern\n if lamp is not None:\n parameters[\"lamp\"] = lamp\n\n query = instrument_inventory(instrument, dataproduct=JWST_DATAPRODUCTS,\n add_filters=parameters, return_data=True, caom=False)\n if len(query['data']) > 0:\n query_results.extend(query['data'])\n\n return query_results", "def __init__(__self__, *,\n end_date: Optional[str] = None,\n start_date: Optional[str] = None,\n term_unit: Optional[str] = None):\n if end_date is not None:\n pulumi.set(__self__, \"end_date\", end_date)\n if start_date is not None:\n pulumi.set(__self__, \"start_date\", start_date)\n if term_unit is not None:\n pulumi.set(__self__, \"term_unit\", term_unit)", "def __init__(__self__, *,\n end_date: Optional[str] = None,\n start_date: Optional[str] = None,\n term_unit: Optional[str] = None):\n if end_date is not None:\n pulumi.set(__self__, \"end_date\", end_date)\n if start_date is not None:\n pulumi.set(__self__, \"start_date\", start_date)\n if term_unit is not None:\n pulumi.set(__self__, \"term_unit\", term_unit)", "def preprocess_dates(args):\n if 'date' in args:\n if args.get('period') == 'range' and 'end_date' in args:\n args['date'] = '{},{}'.format(args['date'],\n args['end_date'])\n return args", "def temp_daterange(start_date,end_date):\r\n # Query\r\n mam_temp_dr_results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\r\n filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()\r\n \r\n # Convert results into a list of min, ave, max temps for date range with specific start_date and end_date\r\n mam_temp_start_end = list(np.ravel(mam_temp_dr_results))\r\n return jsonify(mam_temp_start_end)", "def test_time_series_intraday_date_integer_python2(self, mock_urlopen):\n ts = TimeSeries(key=TestAlphaVantage._API_KEY_TEST,\n output_format='pandas', indexing_type='integer')\n url = \"https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=1min&apikey=test\"\n path_file = self.get_file_from_url(url)\n with open(path_file) as f:\n mock_urlopen.return_value = f\n data, _ = ts.get_intraday(\n \"MSFT\", interval='1min', outputsize='full')\n assert type(data.index[0]) == int", "def start_end(start_date,end_date):\n\n session = Session(engine)\n\n # Query from database full temp results for dates range\n temp_results = session.query(func.min(measurement.tobs), func.avg(measurement.tobs), func.max(measurement.tobs)).\\\n filter(measurement.date >= start_date).\\\n filter(measurement.date <= end_date).all()\n \n session.close() \n \n return jsonify(temp_results)", "def fit_timeseries(xdates, ydata):\n\n pass", "def range_date():\n # Query all stations within a certain range\n data = [Measurement.date, func.max(Measurement.tobs), func.min(Measurement.tobs), func.avg(Measurement.tobs)]\n qry = session.query(*data).filter(Measurement.date.between('2014-01-17', '2017-01-01')).all()\n before_date = list(np.ravel(qry))\n\n return jsonify(before_date)", "def calculate_provision_start_end(trades, instrument, portfolio_swap,\n start_date, end_date, warehousing_type='Daily'):\n \n start_provision = GetProvision(instrument, portfolio_swap, start_date)\n LOGGER.debug(\"Start provision '%s': %s\", instrument.Name(), start_provision)\n \n end_provision = 0.0\n today = acm.Time.DateToday()\n\n if today == end_date and not hist_valuation():\n for trade in trades.AsList():\n funding_instrument = trade.Portfolio().AdditionalInfo().PS_FundingIns()\n if funding_instrument != portfolio_swap:\n continue # Trade doesn't belong to the processed portfolio swap.\n end_provision += calculate(trade)\n else:\n LOGGER.debug(\"Historical valuation. Using PSwap to retrieve provision: '%s'\", portfolio_swap.Name())\n end_provision = GetProvision(instrument, portfolio_swap, end_date)\n \n LOGGER.debug(\"End provision '%s': %s\", instrument.Name(), end_provision)\n \n provision = end_provision - start_provision\n return provision", "def date_start_end(mytrip_start_date,mytrip_end_date):\n mytrip_start_date = dt.date(2015, 8, 10)\n mytrip_end_date= dt.date(2015, 8,14)\n prev_year = dt.timedelta(days=365)\n start_dt_strftime=dt.datetime.strptime('2014-08-10',\"%Y-%m-%d\")\n end_dt_strftime=dt.datetime.strptime('2014-08-14',\"%Y-%m-%d\") \n date_start_end_results=session.query(func.min(measurements.tobs), func.avg(measurements.tobs),func.max(measurements.tobs)).\\\n filter(measurements.date >= mytrip_start_date).filter(measurements.date <= end_dt_strftime).all()\n return(date_start_end_results)", "def sinterp(date, lastextremedate, deltatonext, low, hub, rising=True, verbose=False):\n # offset from date to the last extreme date\n deltatolast = date-lastextremedate\n # the factor from the date to pi scale\n factor = deltatolast.seconds/float(deltatonext.seconds)\n # the mapping into the pi scale\n x = np.pi*factor\n # rising(True) or falling slope(False)\n if rising:\n phase = -1\n else:\n phase = 1\n # return the interpolation with sinus flank\n result = (np.sin(x+phase*(.5*np.pi))+1)*.5*hub+low\n if verbose:\n print 'interpolating'\n print ' date:', date\n print ' deltatolast:', deltatolast\n print ' deltatonext:', deltatonext \n print ' factor:', factor\n print ' x:', x\n print ' result:', result\n return result", "def start_end(start_date, end_date):\n if start_date <= '2016-08-23' or start_date > \"2018-01-01\":\n return jsonify({\"error\": f\"Time period {start_date} not found.\"}), 404\n \n if end_date <= '2016-08-23' or end_date > \"2018-01-01\":\n return jsonify({\"error\": f\"Time period {end_date} not found.\"}), 404\n \n if end_date < start_date:\n return jsonify({\"error\": f\"Time period {end_date} comes before {start_date}.\"}), 404\n \n session = Session(engine)\n\n \"\"\"Return the tobs data\"\"\"\n # Query all tobs data\n station = pd.DataFrame(session.query(Measurement.id, Measurement.station, Measurement.date, Measurement.prcp, Measurement.tobs).all(), columns=['id', 'station', 'date', 'prcp', 'tobs'])\n\n session.close()\n \n station_hist = station[station[\"date\"] >= start_date]\n station_hist = station[station[\"date\"] <= end_date]\n station_hist = station_hist[station_hist[\"station\"] == 'USC00519281']\n low_temp = station_hist[\"tobs\"].min()\n max_temp = station_hist[\"tobs\"].max()\n avg_temp = station_hist[\"tobs\"].mean()\n temp_dict = {\"Low Temp\": low_temp, \"Max Temp\": max_temp, \"Average Temp\": avg_temp}\n return jsonify(temp_dict)", "def filter_on_date(self, start, end, dataframe, datecol=\"datetime\"):\n return dataframe.loc[(dataframe[datecol] < end) & (dataframe[datecol] > start)]", "def date_search(data, start_date, end_date):\n # change dates for date search\n data['timestamp'] = pd.to_datetime(data['timestamp']).dt.date\n d1 = datetime.datetime.strptime(f'{start_date}', '%Y-%m-%d').date()\n d2 = datetime.datetime.strptime(f'{end_date}', '%Y-%m-%d').date()\n\n # constrict data by date search parameters\n less_data = data[(data['timestamp'] >= d1) & (data['timestamp'] <= d2)]\n\n return less_data", "def check_required_range(specific=None, begin=None, end=None):\n\n if not specific and not (begin and end):\n raise ValueError('You must pass some form of date filter')\n\n if specific and (begin and end):\n raise ValueError('Cannot pass both a range and specific dates')\n\n if (begin and not end) or (end and not begin):\n raise ValueError(\"Must pass both begin and end for date range\")", "def test_second_date_static_1(self):\n input_ = (datetime.date(1993, 1, 29), datetime.date(1993, 3, 1))\n expected = (datetime.date(1993, 1, 30), datetime.date(1993, 3, 1))\n actual = self.expander._get_next_days(*input_)\n\n self.assertEqual(expected, actual)", "def for_date(self, date):\n return self.get(start_date__lte=date, end_date__gte=date)", "def temp_range(start_date, end_date):\n \"\"\"for dates between the start and end date inclusive.\"\"\"\n results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()\n\n # Convert list of tuples into normal list\n startend = list(np.ravel(results))\n\n return jsonify(startend)", "def select_date_interval_menu():\n while True:\n start_date = input('\\nInput desired start date with format dd-mm-yyyy:\\n')\n try:\n start_date = datetime.strptime(start_date, '%d-%m-%Y')\n break\n except ValueError:\n print('invalid start date selected')\n while True:\n end_date = input('\\nInput desired start date with format dd-mm-yyyy,\\nor hit enter to select todays date\\n')\n if end_date == '':\n end_date = date.today()\n break\n else:\n try:\n end_date = datetime.strptime(end_date, '%d-%m-%Y')\n break\n except ValueError:\n print('invalid end date selected')\n list_of_dates = pd.date_range(start_date, end_date, freq='d')\n list_of_dates = [i.strftime('%d%m%Y') for i in list_of_dates]\n return list_of_dates", "def mast_query(instrument, templates, start_date, end_date, aperture=None, detector=None, filter_name=None,\n pupil=None, grating=None, readpattern=None, lamp=None):\n\n # If a single template name is input as a string, put it in a list\n if isinstance(templates, str):\n templates = [templates]\n\n # Make sure instrument is correct case\n instrument = JWST_INSTRUMENT_NAMES_MIXEDCASE[instrument.lower()]\n\n # monitor_mast.instrument_inventory does not allow list inputs to\n # the added_filters input (or at least if you do provide a list, then\n # it becomes a nested list when it sends the query to MAST. The\n # nested list is subsequently ignored by MAST.)\n # So query once for each flat template, and combine outputs into a\n # single list.\n query_results = []\n for template_name in templates:\n\n # Create dictionary of parameters to add\n parameters = {\"date_obs_mjd\": {\"min\": start_date, \"max\": end_date},\n \"exp_type\": template_name}\n\n if detector is not None:\n parameters[\"detector\"] = detector\n if aperture is not None:\n parameters[\"apername\"] = aperture\n if filter_name is not None:\n parameters[\"filter\"] = filter_name\n if pupil is not None:\n parameters[\"pupil\"] = pupil\n if grating is not None:\n parameters[\"grating\"] = grating\n if readpattern is not None:\n parameters[\"readpatt\"] = readpattern\n if lamp is not None:\n parameters[\"lamp\"] = lamp\n\n query = monitor_mast.instrument_inventory(instrument, dataproduct=JWST_DATAPRODUCTS,\n add_filters=parameters, return_data=True, caom=False)\n if len(query['data']) > 0:\n query_results.extend(query['data'])\n\n return query_results", "def date(start):\n \"\"\"for all dates greater than and equal to the start date.\"\"\"\n # Create a link to the session\n session = Session(engine)\n \n # Get the start and end date of the data\n final_date = session.query(Measurements.date).order_by(Measurements.date.desc()).first()[0]\n first_date = session.query(Measurements.date).order_by(Measurements.date.asc()).first()[0]\n \n # Make sure date is in range of the available data\n if (start > final_date) or (start < first_date):\n return f\"{start} is not a proper date.</br>Try dates between {first_date} - {final_date}\"\n\n # Query the min, avg, and max temps for the given timeframe\n results = []\n while start <= final_date:\n min_temp = session.query(func.min(Measurements.tobs)).filter(Measurements.date==start).first()[0]\n avg_temp = session.query(func.avg(Measurements.tobs)).filter(Measurements.date==start).first()[0]\n max_temp = session.query(func.max(Measurements.tobs)).filter(Measurements.date==start).first()[0]\n \n # Store the information retrieved\n results.append([start, min_temp, avg_temp, max_temp])\n \n # Update the date to check the next record\n date1 = start.split(\"-\")\n date1 = dt.date(int(date1[0]), int(date1[1]), int(date1[2])) + dt.timedelta(days=1)\n start = date1.strftime(\"%Y-%m-%d\")\n\n session.close()\n\n # Create a dictionary from the query results\n date_temps = []\n for date, min_temp, avg_temp, max_temp in results:\n date_temps_dict = {}\n date_temps_dict[\"date\"] = date\n date_temps_dict[\"min_temp\"] = min_temp\n date_temps_dict[\"avg_temp\"] = round(avg_temp, 2)\n date_temps_dict[\"max_temp\"] = max_temp\n date_temps.append(date_temps_dict)\n \n return jsonify(date_temps)", "def get_series(self, series_code: str, date: datetime):\n\n raise NotImplementedError", "def new_instrument(self, instrument_type):\r\n return self.instrument_list[instrument_type](instrument_type,\r\n self.midi_output)", "def find_end_year(self, der_list):\n project_start_year = self.start_year\n user_given_end_year = self.end_year\n # (1) User-defined (this should continue to be default)\n if self.horizon_mode == 1:\n self.end_year = user_given_end_year\n # (2) Auto-calculate based on shortest equipment lifetime. (No size optimization)\n if self.horizon_mode == 2:\n shortest_lifetime = 1000 # no technology should last 1000 years -- so this is safe to hardcode\n for der_instance in der_list:\n shortest_lifetime = min(der_instance.expected_lifetime, shortest_lifetime)\n if der_instance.being_sized():\n TellUser.error(\"Analysis horizon mode == 'Auto-calculate based on shortest equipment lifetime', DER-VET will not size any DERs \" +\n f\"when this horizon mode is selected. {der_instance.name} is being sized. Please resolve and rerun.\")\n self.end_year = pd.Period(year=0, freq='y') # cannot preform size optimization with mode==2\n self.end_year = project_start_year + shortest_lifetime-1\n # (3) Auto-calculate based on longest equipment lifetime. (No size optimization)\n if self.horizon_mode == 3:\n longest_lifetime = 0\n for der_instance in der_list:\n if der_instance.technology_type != 'Load':\n longest_lifetime = max(der_instance.expected_lifetime, longest_lifetime)\n if der_instance.being_sized():\n TellUser.error(\"Analysis horizon mode == 'Auto-calculate based on longest equipment lifetime', DER-VET will not size any DERs \" +\n f\"when this horizon mode is selected. {der_instance.name} is being sized. Please resolve and rerun.\")\n self.end_year = pd.Period(year=0, freq='y') # cannot preform size optimization with mode==3\n self.end_year = project_start_year + longest_lifetime-1\n return self.end_year", "def get_simulate_date(start, end):\n start_year, start_month, start_day = parse_string_datetime(start)\n end_year, end_month, end_day = parse_string_datetime(end)\n if simulatedate_checkinput(start, end) == 0:\n start_time = datetime.datetime(start_year, start_month, start_day)\n end_time = datetime.datetime(end_year, end_month, end_day)\n return start_time, end_time", "def dates_to_fits(date_begin: astropy.time.Time, date_end: astropy.time.Time) -> dict[str, Any]:\n cards: dict[str, Any] = {}\n if date_begin is None and date_end is None:\n # no date headers can be written\n return cards\n\n cards[\"TIMESYS\"] = \"TAI\"\n\n date_avg = None\n if date_begin is not None and date_end is not None:\n date_avg = date_begin + (date_end - date_begin) / 2.0\n\n for fragment, date in ((\"OBS\", date_begin), (\"BEG\", date_begin), (\"END\", date_end), (\"AVG\", date_avg)):\n if date is not None:\n tai = date.tai\n cards[f\"DATE-{fragment}\"] = tai.isot\n cards[f\"MJD-{fragment}\"] = tai.mjd\n\n return cards" ]
[ "0.7161972", "0.5350023", "0.5343824", "0.52754986", "0.52716273", "0.5243731", "0.5240982", "0.5149819", "0.5139513", "0.50795436", "0.50315154", "0.5018969", "0.5007179", "0.50046945", "0.49793446", "0.4966744", "0.49542493", "0.4934821", "0.4922149", "0.49216446", "0.48913866", "0.4866242", "0.48629916", "0.4860951", "0.48603606", "0.4838361", "0.48369712", "0.481251", "0.4802354", "0.48008588", "0.4794218", "0.47844875", "0.47830176", "0.47770262", "0.47699264", "0.47678912", "0.47605434", "0.47454262", "0.4740615", "0.47350812", "0.47230977", "0.47206354", "0.47178286", "0.47139138", "0.4712416", "0.47078493", "0.47061035", "0.47020453", "0.46969116", "0.46921727", "0.46911252", "0.46886572", "0.46808964", "0.46692392", "0.4663614", "0.46508518", "0.46462092", "0.46423683", "0.46411735", "0.46381903", "0.46340704", "0.46311796", "0.46309233", "0.46303415", "0.46250993", "0.46175972", "0.46082073", "0.4607425", "0.46041143", "0.46014217", "0.46012598", "0.46006688", "0.45942461", "0.45879507", "0.45865238", "0.45865238", "0.4585892", "0.45850256", "0.45811656", "0.45806456", "0.45792583", "0.4578244", "0.45776954", "0.4566693", "0.45661366", "0.45581758", "0.45568526", "0.4554413", "0.45531827", "0.4547619", "0.45426854", "0.4541635", "0.45384282", "0.45375913", "0.45373362", "0.45294493", "0.4528832", "0.4527746", "0.452367", "0.4518177" ]
0.68636316
1
a function that creates observational json output files given start and end dates by extracting data from the GOES database. Only works with GOES instruments.
def database_extraction(mod_start_time,mod_end_time,instrument_chosen,subevent_bool, detect_previous_event = False,thresholds='100,1', one_thresh = False): obs_file_created = False #extending time window window_end_time = (mod_end_time + timedelta(days=2)) window_start_time = (mod_start_time - timedelta(days=2)) #making a list of all dates within window day_list=[] for d in range(10): day_list.append((window_start_time + timedelta(days=d)).date()) print('day list = %s' %day_list) print('determining if an instrument has been chosen') if instrument_chosen: #if an instrument has been chosen, checking to make sure it still works for this date if inst_end < window_end_time: instrument_chosen = False else: #if insturment hasn't been chosen, figuring out what it should be for given date try: #if instrument is specified in cfg using that instrument = cfg.instrument inst_end = datetime.today() print('using %s as our instrument for observations' %instrument) instrument_chosen = True except: #choosing instrument using function if not given in cfg instrument_stuff = choose_prime_inst(window_start_time.date(), window_end_time.date()) instrument = instrument_stuff[0] #figuring out how long we can use this instrument inst_end = instrument_stuff[1] instrument_chosen = True #running katie's code to extract data using chosen instrument and dates print('extracting data from GOES website') #running for only one threshold if one_thresh is true, otherwise running for default #thresholds as well as any additional threshold given if one_thresh: one_sep.run_all(str(window_start_time), str(window_end_time), str(instrument), 'integral', '', '', True, detect_previous_event, thresholds) print('ran for threshold %s' %thresholds) else: if subevent_bool: thresholds = '10,1' #if event is a subevent, changing the threshold in katie's code to #10 MeV > 1pfu so that it will be recorded print('********************SUBEVENT**************************') sep.run_all(str(window_start_time), str(window_end_time), str(instrument), 'integral', '', '', True, detect_previous_event, thresholds) print('ran for subevent') else: #if an event, running with usual thresholds print('********************EVENT*****************************') sep.run_all(str(window_start_time), str(window_end_time),str(instrument), 'integral', '', '', True, detect_previous_event, thresholds) #reloading function so it doesn't keep old data reload(sep) #reformatting csv created from katie's code to json print('extracted - reformatting') for day in day_list: if not obs_file_created: #checking each day within the window to find the csv file if it hasn't #already been found print('thresholds: %s' %thresholds) if one_thresh: #name includes threshold if only ran for one threshold new_obs_name = ('sep_values_' + str(instrument) + '_integral_gt' + str(thresholds).split(',')[0] + '_' + str(thresholds).split(',')[1] + 'pfu_' + day.strftime('%Y_%m_%d').replace('_0','_') + '.csv') else: #otherwise only includes date ran for new_obs_name = ('sep_values_' + str(instrument) + '_integral_' + day.strftime('%Y_%m_%d').replace('_0','_') + '.csv') print('new_os_name %s' %new_obs_name) #checking if that file exists if os.path.exists(katies_path / new_obs_name): #if a file with this date exists, creating the corresponding json file #json name if one_thresh: obs_name = (str(instrument) + '_' + str(day) + 'only_' + str(thresholds).split(',')[0] + 'MeV_event.json') else: obs_name = (str(instrument) + '_' + str(day) + '.json') #creating json file obs_csv2json((katies_path / new_obs_name), obs_name, (ref_path/'example_sepscoreboard_json_file_v20190228.json'), instrument) print('obs file created') #file is created - will not run for anymore dates within window obs_file_created = True return(obs_name) else: print('no csv file found with this date, checking next one')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n parser = argparse.ArgumentParser(description='Process input file and save to output file.')\n\n parser.add_argument('-i',\n '--input',\n help='Trip report file (txt file) to process.',\n action='store')\n\n parser.add_argument('-o',\n '--output',\n help='Output file (json).',\n action='store')\n\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n\n args = parser.parse_args()\n\n if not args.input:\n print('Please specify the trip report input text file.')\n sys.exit(1)\n\n if args.output:\n output_file = args.output\n else:\n output_file = OUTPUT_DEFAULT\n print(f'No output file specified. Using the default: {OUTPUT_DEFAULT}')\n\n trip_list = [] # the list of trips(dictionaries) to output to the data file\n trip_started = False\n day_started = False\n\n with open(args.input, 'r') as fh:\n\n while True:\n\n line = fh.readline()\n\n if not line:\n break\n\n if line.isspace():\n continue\n\n if '________' in line:\n continue\n\n if line.startswith('TRIP'): # detect start of a trip (anywhere from 1-5 days long)\n # print('Start trip' + line)\n\n trip = Parser.trip_parser(line) # create a trip(dictionary)\n trip['days'] = [] # adds a day list (a list of sectors(dictionaries)) to the trip list\n\n trip_started = True # sets trip started to true\n continue\n\n # if new day has not started, create a new day\n if trip_started and not day_started and Parser.new_day(line):\n day = {'sign_on': line[43:48], 'day_sectors': []}\n day_started = True # sets day started to true\n\n if trip_started and day_started:\n if not Parser.end_day(line):\n # print('During day: ' + line)\n day['day_number'] = line[24:26].strip() # assign a day number to that day\n\n if Parser.in_sector(line): # check if a sector has started\n day['day_sectors'].append(Parser.sector_parser(line)) # append sector to day\n\n else:\n day['sign_off'] = line[53:58].strip() # get sign off time from line\n day['flight_duty_period'] = line[71:76].strip() # get flight duty period\n flight_duty_split = day['flight_duty_period'].split('h') # split flight duty period on 'h'\n day['flight_duty_period_hours'] = int(flight_duty_split[0]) # convert to hours\n day['flight_duty_period_minutes'] = int(flight_duty_split[1]) # convert to minutes\n\n day_started = False # sets day started to false\n\n if not day_started:\n if 'Sign_off' in line: # indicated the day is finished and its only a single day trip\n day['lay_over'] = '0h00' # hard coded 0h00 layover as this is return flight from home base\n day['lay_over_hours'] = 0 # hard coded 0 hours\n day['lay_over_minutes'] = 0 # hard coded 0 minutes\n\n # order the day using an OrderedDict, before adding it to the trip dict\n day_ordered = Parser.order_day(day)\n trip['days'].append(day_ordered)\n\n elif '--------------------------------' in line: # the day is over and now layover\n lay_over = line[88:93].strip() # get layover from line\n day['lay_over'] = lay_over # add to day dictionary\n day['lay_over_hours'] = Parser.layover_split(lay_over)[0] # split and convert to int\n day['lay_over_minutes'] = Parser.layover_split(lay_over)[1] # split and convert to int\n\n # order the day using an OrderedDict, before adding it to the trip dict\n day_ordered = Parser.order_day(day)\n trip['days'].append(day_ordered)\n\n if not line[28:36].isspace() and line[27:35] == 'Sign_off': # detect end of a trip\n trip_started = False # set trip started to False\n\n trip_list.append(trip) # append the trip(dictionary) to the trip list\n\n with open(output_file, 'w') as fh: # Convert everything (including datetime object) to string\n fh.write(json.dumps(trip_list, default=str, indent=4)) # write to json file for output\n\n Display.display_data(output_file) # display analytics data within the terminal\n\n return output_file", "def convert_xml_to_json(start_date_str, end_date_str, cache, clean_up=False):\r\n # Establish daterange\r\n sdate = datetime.strptime(start_date_str, \"%Y/%m/%d\")\r\n edate = datetime.strptime(end_date_str, \"%Y/%m/%d\")\r\n date_str_list = [datetime.strftime(i,\"%Y%m%d\") for i in pd.date_range(sdate,edate)]\r\n\r\n # Find only a subset of XML files in the given daterange\r\n xml_subset_nested = [glob.glob(os.path.join(cache, \"NEMPriceSetter_{}*.xml\".format(i))) for i in date_str_list]\r\n xml_subset = [item for sublist in xml_subset_nested for item in sublist]\r\n xml_files = glob.glob(os.path.join(cache, \"NEMPriceSetter_*.xml\"))\r\n print(\"Converting selected {} XML files to JSON, of {} cached files\".format(len(xml_subset),len(xml_files)))\r\n print(xml_subset)\r\n # Read XML files and convert to JSON\r\n for filename in tqdm(xml_subset):\r\n handle = open(filename, 'r')\r\n content = handle.read()\r\n d = xmltodict.parse(content)\r\n write_file = filename.replace(\".xml\", \".json\")\r\n json_path = write_file\r\n with open(json_path, 'w') as fp:\r\n json.dump(d['SolutionAnalysis']['PriceSetting'], fp)\r\n\r\n # Remove XML files if clean_up\r\n if clean_up:\r\n print(\"Clearing {} XML files from cache\".format(len(xml_files)))\r\n for filename in xml_files:\r\n os.remove(os.path.join(cache, filename))", "def export_db_to_json(self, out_json_file):\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n\n eodd_utils = eodatadown.eodatadownutils.EODataDownUtils()\n\n query_result = ses.query(EDDSentinel1ASF).all()\n db_scn_dict = dict()\n for scn in query_result:\n db_scn_dict[scn.PID] = dict()\n db_scn_dict[scn.PID]['PID'] = scn.PID\n db_scn_dict[scn.PID]['Scene_ID'] = scn.Scene_ID\n db_scn_dict[scn.PID]['Product_Name'] = scn.Product_Name\n db_scn_dict[scn.PID]['Product_File_ID'] = scn.Product_File_ID\n db_scn_dict[scn.PID]['ABS_Orbit'] = scn.ABS_Orbit\n db_scn_dict[scn.PID]['Rel_Orbit'] = scn.Rel_Orbit\n db_scn_dict[scn.PID]['Doppler'] = scn.Doppler\n db_scn_dict[scn.PID]['Flight_Direction'] = scn.Flight_Direction\n db_scn_dict[scn.PID]['Granule_Name'] = scn.Granule_Name\n db_scn_dict[scn.PID]['Granule_Type'] = scn.Granule_Type\n db_scn_dict[scn.PID]['Incidence_Angle'] = scn.Incidence_Angle\n db_scn_dict[scn.PID]['Look_Direction'] = scn.Look_Direction\n db_scn_dict[scn.PID]['Platform'] = scn.Platform\n db_scn_dict[scn.PID]['Polarization'] = scn.Polarization\n db_scn_dict[scn.PID]['Process_Date'] = eodd_utils.getDateTimeAsString(scn.Process_Date)\n db_scn_dict[scn.PID]['Process_Description'] = scn.Process_Description\n db_scn_dict[scn.PID]['Process_Level'] = scn.Process_Level\n db_scn_dict[scn.PID]['Process_Type'] = scn.Process_Type\n db_scn_dict[scn.PID]['Process_Type_Disp'] = scn.Process_Type_Disp\n db_scn_dict[scn.PID]['Acquisition_Date'] = eodd_utils.getDateTimeAsString(scn.Acquisition_Date)\n db_scn_dict[scn.PID]['Sensor'] = scn.Sensor\n db_scn_dict[scn.PID]['BeginPosition'] = eodd_utils.getDateTimeAsString(scn.BeginPosition)\n db_scn_dict[scn.PID]['EndPosition'] = eodd_utils.getDateTimeAsString(scn.EndPosition)\n db_scn_dict[scn.PID]['North_Lat'] = scn.North_Lat\n db_scn_dict[scn.PID]['South_Lat'] = scn.South_Lat\n db_scn_dict[scn.PID]['East_Lon'] = scn.East_Lon\n db_scn_dict[scn.PID]['West_Lon'] = scn.West_Lon\n db_scn_dict[scn.PID]['Remote_URL'] = scn.Remote_URL\n db_scn_dict[scn.PID]['Remote_FileName'] = scn.Remote_FileName\n db_scn_dict[scn.PID]['Remote_URL_MD5'] = scn.Remote_URL_MD5\n db_scn_dict[scn.PID]['Total_Size'] = scn.Total_Size\n db_scn_dict[scn.PID]['Query_Date'] = eodd_utils.getDateTimeAsString(scn.Query_Date)\n db_scn_dict[scn.PID]['Download_Start_Date'] = eodd_utils.getDateTimeAsString(scn.Download_Start_Date)\n db_scn_dict[scn.PID]['Download_End_Date'] = eodd_utils.getDateTimeAsString(scn.Download_End_Date)\n db_scn_dict[scn.PID]['Downloaded'] = scn.Downloaded\n db_scn_dict[scn.PID]['Download_Path'] = scn.Download_Path\n db_scn_dict[scn.PID]['Archived'] = scn.Archived\n db_scn_dict[scn.PID]['ARDProduct_Start_Date'] = eodd_utils.getDateTimeAsString(scn.ARDProduct_Start_Date)\n db_scn_dict[scn.PID]['ARDProduct_End_Date'] = eodd_utils.getDateTimeAsString(scn.ARDProduct_End_Date)\n db_scn_dict[scn.PID]['ARDProduct'] = scn.ARDProduct\n db_scn_dict[scn.PID]['ARDProduct_Path'] = scn.ARDProduct_Path\n db_scn_dict[scn.PID]['DCLoaded_Start_Date'] = eodd_utils.getDateTimeAsString(scn.DCLoaded_Start_Date)\n db_scn_dict[scn.PID]['DCLoaded_End_Date'] = eodd_utils.getDateTimeAsString(scn.DCLoaded_End_Date)\n db_scn_dict[scn.PID]['DCLoaded'] = scn.DCLoaded\n db_scn_dict[scn.PID]['Invalid'] = scn.Invalid\n db_scn_dict[scn.PID]['ExtendedInfo'] = scn.ExtendedInfo\n db_scn_dict[scn.PID]['RegCheck'] = scn.RegCheck\n ses.close()\n\n db_plgin_dict = dict()\n if self.calc_scn_usr_analysis():\n plugin_keys = self.get_usr_analysis_keys()\n for plgin_key in plugin_keys:\n query_result = ses.query(EDDSentinel1ASFPlugins).filter(EDDSentinel1ASFPlugins.PlugInName == plgin_key).all()\n db_plgin_dict[plgin_key] = dict()\n for scn in query_result:\n db_plgin_dict[plgin_key][scn.Scene_PID] = dict()\n db_plgin_dict[plgin_key][scn.Scene_PID]['Scene_PID'] = scn.Scene_PID\n db_plgin_dict[plgin_key][scn.Scene_PID]['PlugInName'] = scn.PlugInName\n db_plgin_dict[plgin_key][scn.Scene_PID]['Start_Date'] = eodd_utils.getDateTimeAsString(\n scn.Start_Date)\n db_plgin_dict[plgin_key][scn.Scene_PID]['End_Date'] = eodd_utils.getDateTimeAsString(scn.End_Date)\n db_plgin_dict[plgin_key][scn.Scene_PID]['Completed'] = scn.Completed\n db_plgin_dict[plgin_key][scn.Scene_PID]['Success'] = scn.Success\n db_plgin_dict[plgin_key][scn.Scene_PID]['Outputs'] = scn.Outputs\n db_plgin_dict[plgin_key][scn.Scene_PID]['Error'] = scn.Error\n db_plgin_dict[plgin_key][scn.Scene_PID]['ExtendedInfo'] = scn.ExtendedInfo\n ses.close()\n\n fnl_out_dict = dict()\n fnl_out_dict['scn_db'] = db_scn_dict\n if db_plgin_dict:\n fnl_out_dict['plgin_db'] = db_plgin_dict\n\n with open(out_json_file, 'w') as outfile:\n json.dump(fnl_out_dict, outfile, indent=4, separators=(',', ': '), ensure_ascii=False)", "def loop_through_dates(in_dir,\n out_dir,\n start_date,\n end_date,\n extent,\n temporal_composite=\"monthly\",\n product=\"all_products\",\n area=\"global-extracted\",\n neodaas_name=False):\n\n start_date_obj = datetime.datetime.strptime(start_date, \"%Y-%m-%d\")\n end_date_obj = datetime.datetime.strptime(end_date, \"%Y-%m-%d\")\n\n current_date = start_date_obj\n\n while current_date < end_date_obj:\n\n if temporal_composite.lower() == \"monthly\":\n str_date = current_date.strftime(\"%Y%m\")\n else:\n str_date = current_date.strftime(\"%Y%m%d\")\n\n file_path = os.path.join(in_dir, temporal_composite, product,\n \"{:02}\".format(current_date.year),\n \"*{}*nc\".format(str_date))\n in_netcdfs = glob.glob(file_path)\n\n for in_netcdf in in_netcdfs:\n\n print(\"Extracting {}\".format(in_netcdf))\n out_netcdf_dir = os.path.join(out_dir,\n \"{:02}\".format(current_date.year))\n try:\n os.makedirs(out_netcdf_dir)\n except OSError:\n # If already exists continue\n pass\n\n if neodaas_name:\n output_name = \"OC-CCI_{product}_L4_{area}_{period}_{date}.nc\".format(\n product=product,\n area=area,\n period=TEMPORAL_COMPOSITE_NAMES[temporal_composite],\n date=str_date)\n else:\n output_name = os.path.basename(in_netcdf).replace(\".nc\",\n \"_{}.nc\".format(area))\n out_netcdf_file = os.path.join(out_netcdf_dir, output_name)\n\n if os.path.isfile(out_netcdf_file):\n continue\n\n libsubarea.nk_subarea(in_netcdf, out_netcdf_file,\n [\"lon\", \"lat\"], [extent[0], extent[3]],\n [extent[2], extent[1]])\n\n if temporal_composite.lower() == \"monthly\":\n current_date = current_date + relativedelta.relativedelta(months=1)\n # For the daily, 5day and 8day composite itterate a day at a time so get all composites\n # If not then when starting out of sequence keep missing data.\n else:\n current_date = current_date + relativedelta.relativedelta(days=1)", "def start_end(start_date, end_date):\n if start_date <= '2016-08-23' or start_date > \"2018-01-01\":\n return jsonify({\"error\": f\"Time period {start_date} not found.\"}), 404\n \n if end_date <= '2016-08-23' or end_date > \"2018-01-01\":\n return jsonify({\"error\": f\"Time period {end_date} not found.\"}), 404\n \n if end_date < start_date:\n return jsonify({\"error\": f\"Time period {end_date} comes before {start_date}.\"}), 404\n \n session = Session(engine)\n\n \"\"\"Return the tobs data\"\"\"\n # Query all tobs data\n station = pd.DataFrame(session.query(Measurement.id, Measurement.station, Measurement.date, Measurement.prcp, Measurement.tobs).all(), columns=['id', 'station', 'date', 'prcp', 'tobs'])\n\n session.close()\n \n station_hist = station[station[\"date\"] >= start_date]\n station_hist = station[station[\"date\"] <= end_date]\n station_hist = station_hist[station_hist[\"station\"] == 'USC00519281']\n low_temp = station_hist[\"tobs\"].min()\n max_temp = station_hist[\"tobs\"].max()\n avg_temp = station_hist[\"tobs\"].mean()\n temp_dict = {\"Low Temp\": low_temp, \"Max Temp\": max_temp, \"Average Temp\": avg_temp}\n return jsonify(temp_dict)", "def main():\r\n # handle arguments\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument('-t', '--time', help = 'start time', default = \"2018-12-26 18:11:08.509654\")\r\n parser.add_argument('-bd', '--min_duration', type = int, help = 'minimum duration', default = 25)\r\n parser.add_argument('-td', '--max_duration', type = int, help = 'maximum duration', default = 70)\r\n parser.add_argument('-e', '--events', type = int, help = 'how many events to generate', default = 1000)\r\n\r\n args = parser.parse_args()\r\n\r\n f = open(f\"tests/test_1.json\", \"a\")\r\n\r\n string_time = \"2019-07-08 10:40:00.423123\"\r\n\r\n current_time = datetime.datetime.strptime(string_time, '%Y-%m-%d %H:%M:%S.%f')\r\n\r\n for i in range(0, args.events):\r\n\r\n duration = random.randint(args.min_duration, args.max_duration)\r\n\r\n json = \"{\\\"timestamp\\\": \\\"\" \\\r\n + str(current_time) \\\r\n + \"\\\", \\\"translation_id\\\": \\\"5aa5b2f39f7254a75aa5\\\", \" \\\r\n \"\\\"source_language\\\": \\\"en\\\",\\\"target_language\\\":\" \\\r\n \" \\\"fr\\\",\\\"client_name\\\": \\\"easyjet\\\",\\\"event_name\\\":\" \\\r\n \"\\\"translation_delivered\\\",\\\"nr_words\\\": 30, \\\"duration\\\": \"\\\r\n + str(duration) + \"}\\n\"\r\n\r\n f.write(json)\r\n\r\n minutes = random.randint(0, 59)\r\n seconds = random.randint(0, 59)\r\n\r\n current_time += datetime.timedelta(minutes=minutes, seconds=seconds)\r\n\r\n print(f\"New file is located at inputs/{args.events}.json\")", "def start_end(start_date,end_date):\n\n session = Session(engine)\n\n # Query from database full temp results for dates range\n temp_results = session.query(func.min(measurement.tobs), func.avg(measurement.tobs), func.max(measurement.tobs)).\\\n filter(measurement.date >= start_date).\\\n filter(measurement.date <= end_date).all()\n \n session.close() \n \n return jsonify(temp_results)", "def startend(start,end):\n \n All_temps = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).filter(Measurement.date <= end).all()\n \n session.close()\n\n return jsonify(All_temps)\n\n return jsonify({\"error\": f\"No Data for selected start date.\"}), 404", "def main():\n parser = argparse.ArgumentParser(description='Saves MtGox trades for a time period')\n parser.add_argument('-s','--start', help='The start date in ' + input_dateformat + 'format', required=True)\n parser.add_argument('-e','--end', help='The end date'+ input_dateformat + 'format', required=True)\n args = vars(parser.parse_args())\n start=get_unixtime(args['start'], input_dateformat)\n end=get_unixtime(args['end'], input_dateformat)\n if end < start:\n print \"End timestamp must be later than start timestamp. Exiting\"\n sys.exit()\n print \"Will get trades from \", start, \"to\", end\n\n \"\"\" read the output file and adjust the start date, if it exists\n \"\"\"\n try:\n with open(outfile_name, \"r\") as in_file:\n goxdata = in_file.readlines() \n saved_start=get_unixtime(goxdata[0].split(\",\")[0], input_dateformat)\n saved_end=get_unixtime(goxdata[len(goxdata)-1].split(\",\")[0], input_dateformat)\n\n print \"File found, with start date:\", saved_start, \"and end date\", saved_end\n if start < saved_end:\n print \"Adjusted start time from \", start, \"to \", saved_end\n start = saved_end\n except IOError:\n print \"Output file not found. Will create a new one.\"\n\n \"\"\" get data from MtGox in chunks\n \"\"\"\n try:\n currstart = start\n endreached = False\n while endreached == False:\n # populate the trades dictionary with the next batch of data\n data = fetch_data(currstart)\n print \"Fetching data\", currstart\n if (data == '[]'):\n break \n trades = [mtgox_trade(a) for a in json.loads(data)]\n currstart = trades[-1].timestamp\n\n if trades[-1].timestamp > end:\n endreached = True\n\n # place trades into the out_file before getting the next batch from MtGox \n # so that if the program gets interrupt you have saved the trades obtained so far\n with open(outfile_name, \"a\") as out_file:\n for item in trades:\n # when you request data from a timestamp gox truncates your start time to seconds and then\n # send you everything including the initial second. So you must filter here trades\n # of the start_time second that are already in the database.\n if item.timestamp > start and item.timestamp < end:\n out_file.write(item.trade_to_string()+\"\\n\")\n\n except urllib2.HTTPError, e:\n print \"Error:\", str(e.code), str(e.reason)\n return\n except urllib2.URLError, e:\n print \"Error:\", e\n return", "def ingest_data(args):\n fetchopts = {\n \"fixtures\": FIXTURES,\n \"startyear\": args.start_year or fetch.STARTYEAR,\n \"endyear\": args.end_year or fetch.ENDYEAR\n }\n\n folder, num_series = fetch.fetch_all(**fetchopts)\n\n fcsv, num_rows = wrangle.wrangle_csv()\n fjson, _ = wrangle.wrangle_json()\n\n return (\n \"Ingested %i rows in %i time series to %s\\n\"\n \"Wrote JSON data to %s\\n\"\n \"Wrote CSV data to %s\"\n ) % (num_rows, num_series, folder, fcsv, fjson)", "def main():\n\n # parses arguments\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-s', action='store', dest='start_index', type=int,\n help='The starting index for events. Default is 0')\n\n parser.add_argument('-e', action='store', dest='end_index', type=int,\n help='The starting index for events. Default is 5,000')\n\n results = parser.parse_args()\n\n start_index = results.start_index or 0\n\n end_index = results.end_index or 5000\n\n scraper = Scraper()\n\n # these are the event column titles from the sample import csv given by localist\n event_column_titles = [\n 'Title','Description','Date From','Date To','Recurrence','Start Time','End Time',\n 'Location','Address','City','State','Event Website','Room','Keywords','Tags',\n 'Photo URL','Ticket URL','Cost','Hashtag','Facebook URL','Group','Department',\n 'Allow User Activity','Allow User Attendance','Visibility','Featured Tabs',\n 'Sponsored','Venue Page Only','Exclude From Trending','Event Types','Invited Audience', 'Original URL',\n 'Location Details'\n ]\n\n out_stream = open('event_import.csv', 'w')\n\n writer = Writer(event_column_titles, out_stream)\n\n writer.write_headers()\n\n # iterates through the specified event numbers and scrapes each one and writes\n # it to the output file\n for i in range(start_index, end_index + 1):\n current_url = 'http://test-ucscevents.pantheonsite.io/event/' + str(i)\n print(\"processing url: \" + current_url)\n r = requests.get(current_url)\n if r.status_code != requests.codes.ok:\n print(' 404')\n else:\n soup = get_soup_from_url(current_url)\n events = scraper.scrape_event(soup)\n for event in events:\n event['Original URL'] = current_url\n\n writer.write_object(event) # event written to output file here\n\n out_stream.close()", "def data_gen(path):\n files = glob.glob(path + \"//*.json\")\n final_data = []\n tot_sents = 0\n for file in files:\n if \"processed_data.json\" in file or \"merged_data.json\" in file or \"result.json\" in file:\n print(file)\n continue\n # if \"holiday\" not in file.lower():\n # continue\n if \"merged\" not in file.lower():\n continue\n # if \"holiday\" in file.lower() or \"merged\" in file.lower():\n # continue\n logger.info(\"Processing \" + file)\n data = json.load(open(file, \"r\", encoding=\"UTF-8\"))\n for (i, datai) in enumerate(data):\n try:\n if \"Results\"in datai.keys():\n ents = datai[\"Results\"]\n else:\n # the whole sentence is an entity\n ents = [datai]\n datai[\"Input\"] = datai[\"Text\"]\n\n sent_ents = []\n for (i, ent) in enumerate(ents):\n # make up lost tokens\n if \"Start\" not in ent.keys():\n ent[\"Start\"] = datai[\"Input\"].find(ent[\"Text\"])\n #logger.warning(\"Can't find Start in original, find one according to the entity text\")\n if \"Length\" not in ent.keys():\n ent[\"Length\"] = len(ent[\"Text\"])\n #logger.warning(\"Can't find Length in original, find one according to the entity text\")\n\n # extract key information as a entity\n ent_type = ent[\"Type\"] if \"Type\" in ent.keys() else (ent[\"TypeName\"] if \"TypeName\" in ent.keys() else ent[\"Typename\"])\n # convert datetimeV2.* to *\n ent_type = ent_type.split(\".\")[-1] if \"datetimeV2\" in ent_type else ent_type\n # convert ordinal.* to ordinal\n ent_type = ent_type.split(\".\")[0] if \"ordinal\" in ent_type else ent_type\n \n # remove entities with null typename\n if len(ent_type) == 0:\n logger.warning(\"Null entities occured in \"+str(ents))\n continue\n \n # find entity resolutions\n if \"Value\" in ent.keys() and \"Resolution\" not in ent.keys():\n ent[\"Resolution\"] = ent[\"Value\"]\n if \"Resolution\" in ent.keys():\n if type(ent[\"Resolution\"])==str:\n assert ent[\"Resolution\"] == \"not resolved\"\n value = \"\"\n else:\n if \"values\" in ent[\"Resolution\"].keys():\n if \"timex\" in ent[\"Resolution\"][\"values\"][0].keys():\n value = \";\".join([v[\"timex\"] for v in ent[\"Resolution\"][\"values\"]])\n else:\n value = \";\".join([v[\"value\"] for v in ent[\"Resolution\"][\"values\"]])\n elif \"offset\" in ent[\"Resolution\"].keys():\n value = ent[\"Resolution\"][\"offset\"]\n elif \"Timex\" in ent[\"Resolution\"].keys():\n value = ent[\"Resolution\"][\"Timex\"]\n elif \"timex\" in ent[\"Resolution\"].keys():\n value = ent[\"Resolution\"][\"timex\"]\n elif \"TimeZoneResolution\" in ent[\"Resolution\"].keys():\n value = ent[\"Resolution\"][\"TimeZoneResolution\"]\n else:\n value = ent[\"Resolution\"][\"value\"]\n else:\n value = \"\"\n\n # filter out unwanted entitiy types\n # get rid of number from number entities\n if ent_type.lower() not in [\"number\",\"ip\",\"quotedtext\",\"url\"]:\n # keep only number entities\n # if ent_type.lower() in [\"number\"]:\n sent_ents.append({\n \"Text\":\n ent[\"Text\"],\n \"Type\":\n ent_type,\n \"Start\":\n ent[\"Start\"],\n \"End\":\n ent[\"End\"] if \"End\" in ent.keys() else ent[\"Start\"] + ent[\"Length\"] - 1,\n \"Resolution\":\n value,\n })\n except Exception as e:\n # in case of any un-expected formats\n print(file)\n print(datai)\n print(ent)\n print(ent[\"Resolution\"].keys())\n raise e\n # find context\n if \"Context\" in datai.keys():\n context = datai[\"Context\"][\"ReferenceDateTime\"]\n max_len = len(\"2017-09-27T17:25:54\")\n context = context[:max_len]\n else:\n context = \"\"\n final_data.append({\"Text\": datai[\"Input\"], \"Entities\": sent_ents,\"Context\": context})\n tot_sents += len(data)\n assert len(final_data) == tot_sents\n\n # robustness check and get basic statistics about current dataset\n data_analysis(final_data)\n # dump the processed data\n final_data = json.dumps(final_data, indent=4, ensure_ascii=False)\n new_file = open(path + \"//processed_data.json\", \"w+\", encoding=\"UTF-8\")\n new_file.writelines(final_data)", "def dates(start, end):\n \n sel4 = [\n func.min(Measurement.tobs),\n func.max(Measurement.tobs),\n func.avg(Measurement.tobs),]\n\n if end is None: \n start_date = dt.datetime.strptime(start , '%Y-%m-%d')\n temp_analysis = session.query(*sel4).filter(Measurement.date >= start_date).all() \n else\n end_date = dt.datetime.strptime(end , '%Y-%m-%d')\n temp_analysis = session.query(*sel4).filter(Measurement.date.between (start_date, end_date)).all() \n\n# Create a dictionary from the row data and append to a list of all_dates\n all_dates = []\n for Measurement.tobs in temp_analysis:\n date_dict = {}\n date_dict['TMIN'] = func.min(Measurement.tobs)\n date_dict['TMAX'] = func.max(Measurement.tobs)\n date_dict['TAVG'] = func.avg(Measurement.tobs)\n all_dates.append(date_dict)\n\n return jsonify(date_dict)", "def get_ooni_data(range):\n\n last_ooni_report_generated = get_sys_info(request='last_ooni_report_generated', update=True)\n\n configs = get_configs()\n bucket = 'ooni-data-eu-fra'\n \n session = boto3.Session(profile_name=configs['profile'])\n client = session.client('s3')\n \n #get date range\n now = datetime.datetime.now()\n then = now - datetime.timedelta(days=range)\n delta = datetime.timedelta(days=1)\n\n logger.debug(f\"Now: {now} Then: {then}\")\n\n engine = db.create_engine(configs['database_url'])\n connection = engine.connect()\n metadata = db.MetaData()\n\n ooni_reports = db.Table('ooni_reports', metadata, autoload=True, autoload_with=engine)\n\n file_list = []\n logger.debug(\"Getting OONI file list from S3...\")\n while then <= now:\n date_str = then.strftime('%Y%m%d')\n file_date = 'raw/' + date_str\n then += delta\n\n date_report_list = client.list_objects_v2(\n Bucket=bucket,\n Prefix=file_date\n )\n\n for s3_file in date_report_list['Contents']:\n if ('webconnectivity' in s3_file['Key']) and ('jsonl' in s3_file['Key']):\n file_list.append(s3_file['Key'])\n\n\n # Process Files\n domain_list, mirror_list = lists()\n\n matching_domain_data = {}\n for domain in domain_list:\n matching_domain_data[domain['name']] = []\n\n for file in file_list:\n file_parts = file.split('/')\n local_name = ('-').join(file_parts)\n local_file_path = configs['local_tmp'] + '/' + local_name\n\n logger.debug(f\"Downloading to: {local_file_path}\")\n with open(local_file_path, 'wb') as file_data:\n client.download_fileobj(bucket, file, file_data)\n\n data = []\n \n with gzip.open(local_file_path) as raw_file:\n line = raw_file.readline()\n json_data = json.loads(line)\n data.append(json_data)\n\n os.remove(local_file_path) \n \n for jdata in data:\n logger.debug(f\"input: {jdata['input']}\")\n domain_name = False\n for domain in domain_list:\n match = site_match(domain['name'], jdata['input'])\n if match:\n domain_name = domain['name']\n domain_id = domain['id']\n if not domain_name:\n logger.debug(\"No match.\")\n continue\n \n date_reported = datetime.datetime.strptime(jdata['measurement_start_time'], '%Y-%m-%d %H:%M:%S')\n matching_domain_data[domain_name] = {\n 'domain_id': domain_id,\n 'url_accessed': jdata['input'],\n 'country': jdata['probe_cc'],\n 'blocked': jdata['test_keys']['blocking'],\n 'dns_consistency': jdata['test_keys']['dns_consistency'],\n 'date_reported': date_reported\n } \n \n for key in jdata['test_keys']['requests']:\n for s_key in key:\n if s_key == 'failure':\n matching_domain_data[domain_name]['failure'] = key['failure']\n\n print(f\"Matching Domain Data for {domain_name}:{matching_domain_data[domain_name]}\")\n # Make report\n ooni_report_data = matching_domain_data[domain_name]\n\n insert = ooni_reports.insert().values(**ooni_report_data)\n result = connection.execute(insert)\n\n return", "def get_events(start_date, end_date, source=utils.get_native_source, **kwargs):\n if not isinstance(source, games.models.Source):\n source = source()\n logger.info(\"getting events from source %s...\", source)\n if not source:\n return []\n # with open('sportmonks/response_texts/fixtures_{}-{}.txt'.format(start_date.strftime('%Y-%m-%d'),\n # end_date.strftime('%Y-%m-%d')), 'w') as outfile:\n # season is necessary so that the season object is extracted and used\n include = kwargs.get('include', '')\n include = ','.join([include, 'season']) if include else 'season'\n kwargs['include'] = include\n data, meta, status_code = sportmonks.fixtures.by_date_range(start_date=start_date, end_date=end_date, **kwargs)\n # json.dump(data, outfile, indent=4)\n if not data:\n return []\n pre_events = []\n try:\n num_fetched_objects = len(data)\n except:\n num_fetched_objects = None\n num_processed_objects = 0\n try:\n for obj in data:\n num_processed_objects += 1\n try:\n sid = obj.get('id', None)\n time = obj.get('time', dict())\n starting_at = time.get('starting_at', dict())\n event_datetime = get_date(starting_at, 'date_time')\n # custom_timezone = pytz.timezone('Europe/Athens')\n # event_datetime = event_datetime.astimezone(custom_timezone)\n home_team_sid = obj.get('localteam_id', None)\n away_team_sid = obj.get('visitorteam_id', None)\n competition_season_sid = obj.get('season_id', None)\n season_string = obj.get('season', {}).get('data', {}).get('name')\n stage_sid = obj.get('stage_id', None)\n round_sid = obj.get('round_id', None)\n competition_sid = obj.get('league_id', None)\n except Exception as e:\n logger.data_error('%s', e)\n continue\n\n zak_season_name = games.models.Season.zakandify_season_string(season_string)\n season = zakanda.utils.season_from_season_name(zak_season_name)\n if not season:\n logger.data_error('Could not extract season object from season string: %s', season_string)\n continue\n\n # todo sportmonks fix\n # if the event involves a problematic team it is not created in order to avoid future problems\n if is_in_problematic_teams(home_team_sid):\n home_team_sid = None\n if is_in_problematic_teams(away_team_sid):\n away_team_sid = None\n\n competition_seasons = games.models.CompetitionSeason.by_sid(competition_season_sid, source, season)\n try:\n competition_season = competition_seasons.first() # only one entity exists in the queryset\n except Exception as e:\n logger.warning('%s', e)\n competition_season = None\n\n home_team = games.models.Team.by_sid(home_team_sid, source)\n away_team = games.models.Team.by_sid(away_team_sid, source)\n pre_event = pre_models.PreEvent(source, sid, event_datetime, home_team, away_team, competition_season)\n pre_events.append(pre_event)\n except Exception as e:\n logger.error('%s Unexpected problem with sportmonks.fixtures.by_date_range %s %s from source %s',\n e, start_date, end_date, source)\n logger.info(\"%s event objects were contained in the response\", num_fetched_objects)\n logger.info(\"%s event objects were processed\", num_processed_objects)\n logger.info(\"%s pre events were created\", len(pre_events))\n return pre_events", "def generate(start_date, episodes, steps, output_file):\n header = ','.join(FIELDS) + '\\n'\n with open(output_file, 'w') as fd:\n fd.write(header)\n data_arrays = []\n first_dp = generate_data_point(start_date)\n data_arrays.append(first_dp)\n\n interval = int(1440/steps)\n cur_ts = increment_ts(start_date, interval)\n\n while step_diff(start_date, cur_ts, interval) < steps*episodes:\n dp_tmp = generate_data_point(cur_ts)\n data_arrays.append(dp_tmp)\n cur_ts = increment_ts(cur_ts, interval)\n\n for dp in data_arrays:\n row = ','.join(dp) + '\\n'\n fd.write(row)", "def net_export(\n self,\n timezone: str = \"\",\n start_date_range: str = None,\n end_date_range: str = None,\n **_,\n ):\n outputs: dict = {}\n\n if self.AGG_BY == \"zone\":\n agg = \"zone\"\n else:\n agg = \"region\"\n\n # List of properties needed by the plot, properties are a set of tuples and\n # contain 3 parts: required True/False, property name and scenarios required,\n # scenarios must be a list.\n properties = [(True, f\"{agg}_Net_Interchange\", self.Scenarios)]\n\n # Runs get_formatted_data within PlotDataStoreAndProcessor to populate PlotDataStoreAndProcessor dictionary\n # with all required properties, returns a 1 if required data is missing\n check_input_data = self.get_formatted_data(properties)\n\n if 1 in check_input_data:\n return MissingInputData()\n\n for zone_input in self.Zones:\n logger.info(f\"{self.AGG_BY} = {zone_input}\")\n net_export_chunks = []\n\n for scenario in self.Scenarios:\n\n logger.info(f\"Scenario = {scenario}\")\n net_export_read = self[f\"{agg}_Net_Interchange\"].get(scenario)\n if shift_leapday:\n net_export_read = adjust_for_leapday(net_export_read)\n\n net_export = net_export_read.xs(zone_input, level=self.AGG_BY)\n net_export = net_export.groupby(\"timestamp\").sum()\n net_export.columns = [scenario]\n\n if pd.notna(start_date_range):\n net_export = set_timestamp_date_range(\n net_export, start_date_range, end_date_range\n )\n if net_export.empty is True:\n logger.warning(\"No data in selected Date Range\")\n continue\n\n net_export_chunks.append(net_export)\n\n net_export_all_scenarios = pd.concat(net_export_chunks, axis=1)\n\n unitconversion = self.capacity_energy_unitconversion(\n net_export_all_scenarios, self.Scenarios\n )\n net_export_all_scenarios = (\n net_export_all_scenarios / unitconversion[\"divisor\"]\n )\n # Data table of values to return to main program\n data_table_out = net_export_all_scenarios.add_suffix(\n f\" ({unitconversion['units']})\"\n )\n\n # Make scenario/color dictionary.\n color_dict = dict(zip(net_export_all_scenarios.columns, self.color_list))\n\n mplt = PlotLibrary()\n fig, ax = mplt.get_figure()\n plt.subplots_adjust(wspace=0.05, hspace=0.2)\n\n if net_export_all_scenarios.empty:\n out = MissingZoneData()\n outputs[zone_input] = out\n continue\n\n for column in net_export_all_scenarios:\n mplt.lineplot(\n net_export_all_scenarios, column, color_dict, label=column\n )\n ax.set_ylabel(\n f'Net exports ({unitconversion[\"units\"]})',\n color=\"black\",\n rotation=\"vertical\",\n )\n ax.set_xlabel(timezone, color=\"black\", rotation=\"horizontal\")\n ax.margins(x=0.01)\n ax.axhline(y=0, linestyle=\":\", color=\"gray\")\n mplt.set_subplot_timeseries_format()\n\n mplt.add_legend(reverse_legend=True)\n if plot_data_settings[\"plot_title_as_region\"]:\n mplt.add_main_title(zone_input)\n\n outputs[zone_input] = {\"fig\": fig, \"data_table\": data_table_out}\n return outputs", "def create_scn_date_imgs(self, start_date, end_date, img_size, out_img_dir, img_format, vec_file, vec_lyr, tmp_dir, order_desc=True):\n out_img_ext = 'png'\n if img_format.upper() == 'PNG':\n out_img_ext = 'png'\n elif img_format.upper() == 'JPEG':\n out_img_ext = 'jpg'\n elif img_format.upper() == 'GTIFF':\n out_img_ext = 'tif'\n else:\n raise EODataDownException(\"The input image format ({}) was recognised\".format(img_format))\n eoddutils = eodatadown.eodatadownutils.EODataDownUtils()\n scn_dates = self.find_unique_scn_dates(start_date, end_date, valid=True, order_desc=order_desc)\n scn_qklks = dict()\n for scn_date in scn_dates:\n print(\"Processing {}:\".format(scn_date[0].strftime('%Y-%m-%d')))\n scns = self.get_scns_for_date(scn_date[0])\n scn_files = []\n for scn in scns:\n ard_file = eoddutils.findFile(scn.ARDProduct_Path, \"*dB*.tif\")\n print(\"\\t{}: {} - {}\".format(scn.PID, scn.Scene_ID, ard_file))\n scn_files.append(ard_file)\n\n # VV, VH, VV/VH\n bands = '1,2,3'\n\n scn_date_str = scn_date[0].strftime('%Y%m%d')\n quicklook_img = os.path.join(out_img_dir, \"sen1_qklk_{}.{}\".format(scn_date_str, out_img_ext))\n import rsgislib.tools.visualisation\n rsgislib.tools.visualisation.createQuicklookOverviewImgsVecOverlay(scn_files, bands, tmp_dir,\n vec_file, vec_lyr,\n outputImgs=quicklook_img,\n output_img_sizes=img_size,\n gdalformat=img_format,\n scale_axis='auto',\n stretch_file=self.std_vis_img_stch,\n overlay_clr=[255, 255, 255])\n scn_qklks[scn_date_str] = dict()\n scn_qklks[scn_date_str]['qkimage'] = quicklook_img\n scn_qklks[scn_date_str]['scn_date'] = scn_date[0]\n return scn_qklks", "def download_json_files(ori, des, date, hredep, args):\n\n print(\"======================================\")\n print(\"= Extracting JSON files from OTP API =\")\n print(\"======================================\")\n i = 0\n for id in ori.keys(): # just so we can get all the ids (could have been des.keys() or hredep.keys())\n if download_json:\n # don't retrieve the data from OTP API if the user specifies it\n url = build_url(ori[id]['orilon'], ori[id]['orilat'],\n des[id]['deslon'], des[id]['deslat'],\n date[id]['year'], date[id]['month'], date[id]['day'],\n hredep[id]['hour'], hredep[id]['minute'],\n args[id])\n try :\n extract_json(url, id, make_dir(json_output))\n except OSError:\n print(\"ERROR : OTP is not currently running on the given port\")\n exit();\n\n printrp('( ' + str(i) + ' / ' + str(len(ori)) + ' )') if found_CmdPrinter else print(i)\n i += 1\n if download_json:\n print('( ' + str(i) + ' / ' + str(len(ori)) + ' )')\n else:\n print(\"Already downloaded\")", "def build():\n for root, dirs, files in os.walk(IN_PATH):\n for filename in files:\n if filename.endswith('.csv'):\n with open(os.path.join(IN_PATH, filename), encoding='utf-8') as f:\n reader = csv.reader(f)\n next(reader)\n data = nested_dict()\n web_data = nested_dict()\n for row in reader:\n if row[0].startswith('report.') or row[0].startswith('cardset.'):\n d = data\n elif row[0].startswith('web.'):\n d = web_data\n path = row[0].split('.')\n for i in range(len(path)):\n if i == len(path) - 1:\n d[path[i]] = row[1]\n else:\n d = d[path[i]]\n with open (os.path.join(OUT_PATH, filename.replace('.csv', '.json')), 'w', encoding='utf-8') as fout:\n json.dump({**data, **web_data}, fout)\n with open (os.path.join(WEB_PATH, filename.replace('.csv', '.js')), 'w', encoding='utf-8') as fout:\n fout.write('var STRINGS = {};'.format(json.dumps(web_data)))\n\n with open(os.path.join(IN_PATH, 'en_US.csv'), encoding='utf-8') as f:\n reader = csv.reader(f)\n next(reader)\n data = nested_dict()\n web_data = nested_dict()\n for row in reader:\n path = row[0].split('.')\n if row[0].startswith('report.') or row[0].startswith('cardset.'):\n d = data\n elif row[0].startswith('web.'):\n d = web_data\n\n for i in range(len(path)):\n if i == len(path) - 1:\n d[path[i]] = zz_string(row[1], row[0])\n else:\n d = d[path[i]]\n with open(os.path.join(OUT_PATH, 'zz_ZZ.json'), 'w', encoding='utf-8') as fout:\n json.dump({**data, **web_data}, fout)\n with open(os.path.join(WEB_PATH, 'zz_ZZ.js'), 'w', encoding='utf-8') as fout:\n fout.write('var STRINGS = {};'.format(json.dumps(web_data)))", "def start_end(start_date, end_date):\n print(\"server received request for tobs stats start date to end date...\")\n # correct for dates before the start of our data\n if start_date < '2010-01-01':\n start_date = '2010-01-01'\n # correct for dates beyond the end of our data\n if end_date > '2017-08-23':\n end_date = '2017-08-23'\n range_df = temps_df[(temps_df['date'] >= start_date) & (temps_df['date'] <= end_date)]\n lowest = range_df['tobs'].min()\n highest = range_df['tobs'].max()\n average = range_df['tobs'].mean()\n output = {'TMIN': lowest, 'TMAX': highest, 'TAVG': average}\n return jsonify(output)", "def generate_json_network_reports(init_date, last_date):\n\n report = network_report_for_carrier(init_date, last_date)\n\n save_json_report_to_file(report, init_date.year, init_date.month,\n \"network_report_\")", "def obs_csv2json(input_file,output_file,example_path,instrument):\r\n\r\n obs_path = Path(cfg.obs_path)\r\n \r\n with open(example_path,'r') as e:\r\n example = js.load(e)\r\n \r\n #deleting unused categories\r\n del(example['sep_forecast_submission']['forecasts'])\r\n del(example['sep_forecast_submission']['triggers'][2])\r\n del(example['sep_forecast_submission']['triggers'][1])\r\n del(example['sep_forecast_submission']['triggers'][0])\r\n del(example['sep_forecast_submission']['triggers'][0]['particle_intensity']['instrument'])\r\n del(example['sep_forecast_submission']['triggers'][0]['particle_intensity']['last_data_time'])\r\n del(example['sep_forecast_submission']['contacts'])\r\n del(example['sep_forecast_submission']['model'])\r\n del(example['sep_forecast_submission']['issue_time'])\r\n \r\n example['sep_forecast_submission']['mode'] = 'observation'\r\n\r\n #json template for observations\r\n obs_json = example\r\n\r\n fieldnames = ('energy_threshold','flux_threshold','start_time','intensity',\r\n 'peak_time','rise_time','end_time','duration','fluence>10',\r\n 'fluence>100')\r\n\r\n #extracting data from csv file\r\n with open(input_file,'r') as f:\r\n reader = csv.DictReader(f, fieldnames)\r\n out = js.dumps( [ row for row in reader ] )\r\n\r\n obs_data = js.loads(out)\r\n\r\n data={}\r\n (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['observatory']) = instrument\r\n\r\n #creating data for all energy levels forecast\r\n for j in range(1,len(obs_data)):\r\n data[j-1]=obs_data[j]\r\n\r\n #recording start and end times for all events\r\n for i in range(len(data)):\r\n data[i]['start_time'] = datetime.strptime(data[i]['start_time'],'%Y-%m-%d %H:%M:%S')\r\n data[i]['start_time'] = data[i]['start_time'].isoformat()\r\n data[i]['end_time'] = datetime.strptime(data[i]['end_time'],'%Y-%m-%d %H:%M:%S')\r\n data[i]['end_time'] = data[i]['end_time'].isoformat()\r\n data[i]['peak_time'] = datetime.strptime(data[i]['peak_time'],'%Y-%m-%d %H:%M:%S')\r\n data[i]['peak_time'] = data[i]['peak_time'].isoformat()\r\n \r\n #recording observed values for all events\r\n if i > 0:\r\n (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['ongoing_events']).append({})\r\n\r\n event = (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['ongoing_events'][i])\r\n \r\n #start and end times\r\n event['start_time']=data[i]['start_time']\r\n event['threshold'] = data[i]['flux_threshold']\r\n event['energy_min'] = float(data[i]['energy_threshold'][1:])\r\n event['energy_max'] = -1\r\n event['end_time']=data[i]['end_time']\r\n\r\n #peak values\r\n event['peak_intensity']=data[i]['intensity']\r\n event['peak_time'] = data[i]['peak_time']\r\n event['intensity_units']='pfu'\r\n \r\n #fluence values\r\n event['fluence'] = [{'energy_min' : '10','fluence_value' : 'fluence_value',\r\n 'units' : 'MeV [cm^-2]'},\r\n {'energy_min' : '100', 'fluence_value' : 'fluence_value',\r\n 'units' : 'MeV [cm^-2]'}]\r\n event['fluence'][0]['fluence']=data[i]['fluence>10']\r\n event['fluence'][1]['fluence']=data[i]['fluence>100']\r\n\r\n\r\n if float(event['peak_intensity']) >= cfg.pfu_threshold[cfg.energy_threshold.index\r\n (int(event['energy_min']))]:\r\n event['all_clear_boolean'] = 'false'\r\n\r\n else:\r\n event['all_clear_boolean'] = 'true'\r\n\r\n\r\n #building json file\r\n with open(obs_path / output_file, 'w') as s:\r\n js.dump(obs_json,s,indent=1)\r\n print('json file %s created' %output_file)\r\n \r\n return", "def download_data(start_date, end_date, gr, instr, path):\n _from = f'{start_date.isoformat()}T00:00:00Z'\n _to = f'{end_date.isoformat()}T00:00:00Z'\n gran = gr\n instr_ = instr\n\n params = {\n \"granularity\": gran,\n \"from\": _from,\n \"to\": _to\n }\n \n print(f'Saving to path: {path}')\n \n with open(f'{path}/{instr}_{gran}_{start_date.isoformat()}_{end_date.isoformat()}.csv', \"w\") as O:\n for r in InstrumentsCandlesFactory(instrument=instr_, params=params):\n print(\"REQUEST: {} {} {}\".format(r, r.__class__.__name__, r.params))\n rv = client.request(r)\n cnv(r.response, O)\n \n print('Finished')", "def step010():\n logger.logMessage('Begin: Getting candidate documents from elasticsearch')\n\n def limitHour(d):\n thish = d.start_time.tz_localize(tz='UTC')\n nexth = thish + dt.timedelta(hours=1)\n return { 'range': { 'time': {'gte':thish, 'lt':nexth } } }\n \n conn = sql.create_engine(pgurl)\n client = es.Elasticsearch(hostlist)\n dupesDF = pd.read_sql_table('weather_dupes',conn).set_index('time')\n hours =dupesDF.to_period('H').reset_index()['time'].unique()\n ranges = [ limitHour(h) for h in hours ]\n query = { \n '_source': [ 'tsa','time' ],\n 'query': { \n 'bool': { 'should': ranges } \n } \n }\n #logger.logMessage(level='DEBUG',message='Query body: {0}'.format(query))\n hits = eshelp.scan(client=client,index=indexName,doc_type='doc',query=query)\n numRecs = 0\n with open(candidatesFile,'w') as f:\n for h in hits:\n src = h['_source']\n tsa = int(src['tsa'])\n time = src['time']\n docid = h['_id']\n idx = h['_index']\n f.write(f'{tsa:014d};{time:25s};{docid:32s};{idx:32s}\\n') \n numRecs += 1\n if numRecs % 1000 == 0:\n logger.logMessage(level='DEBUG',message=\"{0:9d} records written\".format(numRecs))\n logger.logMessage(message=\"{0:9d} total records written\".format(numRecs))\n logger.logMessage('End: Getting candidate documents from elasticsearch')", "def _write_to_dataset(parser1, parser2, dset, rundate):\n\n data_all1 = parser1.as_dict()\n data_all2 = parser2.as_dict()\n if parser1.file_path == parser2.file_path:\n collection = [data_all1]\n else:\n collection = [data_all1, data_all2]\n\n # Meta information\n dset.meta[\"tech\"] = \"slr\"\n dset.meta.add(\"file\", parser1.file_path.stem, section=\"input\")\n dset.meta.add(\"file\", parser2.file_path.stem, section=\"input\")\n dset.meta.add(\"type\", config.tech.obs_format.str.upper(), section=\"input\")\n\n # Make new dict \"obs_data\" containing only data in relevant time interval:\n arc_length = config.tech.arc_length.float\n rundate_datetime = datetime(rundate.year, rundate.month, rundate.day)\n obs_data = dict()\n for data_all in collection:\n for i, x in enumerate(data_all[\"meta\"][\"obs_time\"]):\n if rundate_datetime <= x < rundate_datetime + timedelta(days=arc_length):\n for key in (\"meta\", \"obs\", \"obs_str\"):\n for field, val in data_all[key].items():\n obs_data.setdefault(key, dict()).setdefault(field, list()).append(val[i])\n\n data_all.pop(\"meta\")\n data_all.pop(\"obs\")\n data_all.pop(\"obs_str\")\n\n for key in data_all.keys():\n if key.startswith(\"met_\"):\n for key2, val in data_all[key].items():\n obs_data.setdefault(key, dict()).setdefault(key2, list()).append(val)\n elif key.startswith(\"satellite_\"):\n # TODO: Use this information in the future?\n continue\n elif key.startswith(\"station_\"):\n # TODO: Use this information in the future?\n continue\n else:\n log.fatal(f\"Unknown data type{key}\")\n\n obs_date = obs_data[\"meta\"][\"obs_date\"]\n time = [obs_date[i] + timedelta(seconds=obs_data[\"meta\"][\"obs_sec\"][i]) for i in range(0, len(obs_date))]\n dset.num_obs = len(obs_data[\"meta\"][\"obs_time\"])\n dset.add_time(\"time\", val=time, scale=\"utc\", fmt=\"datetime\")\n dset.add_text(val=obs_data[\"meta\"][\"station\"], name=\"station\")\n dset.add_text(val=obs_data[\"meta\"][\"satellite\"], name=\"satellite\")\n dset.add_float(val=obs_data[\"meta\"][\"bin_rms\"], unit=\"picoseconds\", name=\"bin_rms\")\n # Positions\n trf = apriori.get(\"trf\", time=dset.time)\n for station in dset.unique(\"station\"):\n trf_site = trf[station]\n station_pos = trf_site.pos.trs.val\n log.debug(f\"Station position for {station} ({trf_site.name}) is (x,y,z) = {station_pos.mean(axis=0)}\")\n domes = trf_site.meta[\"domes\"]\n obs_data[\"pos_\" + station] = station_pos\n obs_data[\"station-other_\" + station] = dict(domes=domes, cdp=station, site_id=station)\n dset.add_position(\n \"site_pos\",\n time=dset.time,\n system=\"trs\",\n val=np.array([obs_data[\"pos_\" + s][idx] for idx, s in enumerate(dset.station)]),\n )\n # Station data\n sta_fields = set().union(*[v.keys() for k, v in obs_data.items() if k.startswith(\"station_\")])\n for field in sta_fields:\n dset.add_float(field, val=np.array([float(obs_data[\"station_\" + s][field]) for s in dset.station]))\n sta_fields = set().union(*[v.keys() for k, v in obs_data.items() if k.startswith(\"station-other_\")])\n for field in sta_fields:\n dset.add_text(field, val=[obs_data[\"station-other_\" + s][field] for s in dset.station])\n\n # Station meta\n station_keys = sorted([k for k, v in obs_data.items() if k.startswith(\"station-other_\")])\n pos_keys = sorted([k for k, v in obs_data.items() if k.startswith(\"pos_\")])\n\n for sta_key, pos_key in zip(station_keys, pos_keys):\n sta_name = sta_key.replace(\"station-other_\", \"\")\n cdp = obs_data[sta_key][\"cdp\"]\n dset.meta.add(sta_name, \"site_id\", cdp)\n longitude, latitude, height, _ = sofa.iau_gc2gd(2, obs_data[pos_key][0, :]) # TODO: Reference ellipsoid\n dset.meta[\"station\"].setdefault(sta_name, {})[\"cdp\"] = cdp\n dset.meta[\"station\"].setdefault(sta_name, {})[\"site_id\"] = cdp\n dset.meta[\"station\"].setdefault(sta_name, {})[\"domes\"] = obs_data[sta_key][\"domes\"]\n dset.meta[\"station\"].setdefault(sta_name, {})[\"marker\"] = \" \"\n dset.meta[\"station\"].setdefault(sta_name, {})[\"description\"] = \" \"\n dset.meta[\"station\"].setdefault(sta_name, {})[\"longitude\"] = longitude\n dset.meta[\"station\"].setdefault(sta_name, {})[\"latitude\"] = latitude\n dset.meta[\"station\"].setdefault(sta_name, {})[\"height\"] = height\n\n # Satellite data\n sat_fields = set().union(*[v.keys() for k, v in obs_data.items() if k.startswith(\"satellite_\")])\n for field in sat_fields:\n dset.add_float(field, val=np.array([float(obs_data[\"satellite_\" + s][field]) for s in dset.satellite]))\n\n # Observations\n # In the dataset, obs_time is seconds since rundate:\n v = [\n (obs_data[\"meta\"][\"obs_date\"][i] - rundate_datetime).total_seconds() + obs_data[\"meta\"][\"obs_sec\"][i]\n for i in range(0, dset.num_obs)\n ]\n\n obs_data[\"obs\"].pop(\"obs_time\")\n dset.add_float(\"obs_time\", val=v)\n for field, values in obs_data[\"obs\"].items():\n dset.add_float(field, val=np.array(values))\n\n for field, values in obs_data[\"obs_str\"].items():\n dset.add_text(field, val=values)\n\n return obs_data", "def read_json_to_df(start_dt, end_dt, cache):\r\n # Establish files daterange\r\n collect_sdt = start_dt\r\n if (end_dt.hour == 0) & (end_dt.minute == 0):\r\n collect_edt = end_dt\r\n else:\r\n collect_edt = end_dt + timedelta(days=1)\r\n\r\n date_str_list = [datetime.strftime(i,\"%Y-%m-%d\") for i in pd.date_range(collect_sdt, collect_edt)]\r\n\r\n # Find only a subset of JSON files in the given daterange\r\n JSON_subset = [glob.glob(os.path.join(cache, \"NEMED_PS_DAILY_{}*.json\".format(i))) for i in date_str_list]\r\n JSON_subset = [item for sublist in JSON_subset for item in sublist]\r\n\r\n print(\"Reading selected {} JSON files to pandas, of cached files\".format(len(JSON_subset)))\r\n logger.info(\"Loading Cached Price Setter Files...\")\r\n\r\n all_df = []\r\n for file in tqdm(JSON_subset):\r\n with open(file, 'r') as f:\r\n data = json.loads(f.read())\r\n df_nested_list = pd.json_normalize(data)\r\n df_nested_list['@PeriodID'] = pd.to_datetime(df_nested_list['@PeriodID'], format=\"%Y-%m-%d %H:%M:%S\").dt.tz_localize(None)\r\n df_nested_list = df_nested_list[(df_nested_list['@Market'] == 'Energy') &\r\n (df_nested_list['@DispatchedMarket'] == 'ENOF')]\r\n all_df += [df_nested_list]\r\n\r\n all_df = pd.concat(all_df)\r\n all_df.columns = all_df.columns.str.strip('@')\r\n all_df.drop(['Market','DispatchedMarket'], axis=1, inplace=True)\r\n all_df = all_df.astype({'RegionID': str, 'Price': float, 'Unit': str, 'BandNo': int, \\\r\n 'Increase': float, 'RRNBandPrice': float, 'BandCost': float})\r\n all_df = all_df[all_df['PeriodID'].between(start_dt, end_dt, inclusive=\"right\")].sort_values(['PeriodID','RegionID'])\r\n return all_df.reset_index(drop=True)", "def dates_to_fits(date_begin: astropy.time.Time, date_end: astropy.time.Time) -> dict[str, Any]:\n cards: dict[str, Any] = {}\n if date_begin is None and date_end is None:\n # no date headers can be written\n return cards\n\n cards[\"TIMESYS\"] = \"TAI\"\n\n date_avg = None\n if date_begin is not None and date_end is not None:\n date_avg = date_begin + (date_end - date_begin) / 2.0\n\n for fragment, date in ((\"OBS\", date_begin), (\"BEG\", date_begin), (\"END\", date_end), (\"AVG\", date_avg)):\n if date is not None:\n tai = date.tai\n cards[f\"DATE-{fragment}\"] = tai.isot\n cards[f\"MJD-{fragment}\"] = tai.mjd\n\n return cards", "def main():\n # Pull variables from pf\n profileref = pfget('google_mapfeed.pf', profile)\n dbname = profileref['dbname']\n path = profileref['webbase']\n finalfile = '%s/%s' % (path, profileref['file'])\n bufferfile = '%s+' % finalfile\n max_nquakes = 600\n element_fields = ['lat', 'lon', 'depth', 'time', 'local_timestring', 'utc_timestring', 'magnitude', 'auth']\n\n if verbose:\n print \"Start: Creating main JSON file '%s' for all stations at %s\" % (finalfile, time.strftime(\"%a, %d %b %Y %H:%M:%S +0000\", time.gmtime()))\n\n now = time.time()\n # Set time zone\n os.putenv('TZ','US/Pacific')\n time.tzset()\n if verbose:\n print \"The time zone is: %s\" % (time.tzname)[0]\n print \"The current time is: %s\" % now\n\n # Override defaults\n if override_number:\n if verbose:\n print \"Overriding default number of events (%d) with %d\" % (max_nquakes, override_number)\n nquakes = override_number\n else:\n nquakes = max_nquakes\n if override_timerange:\n if verbose:\n print \"Overiding default number of events (%d) with time range %d seconds\" % (max_nquakes, override_timerange)\n nquakes = False\n\n # Database processing\n if verbose:\n print \"Opening database\";\n print \"Number of events requested: %s\" % nquakes\n db = dbopen(dbname, 'r')\n\n '''\n Occasionally there is more than one magnitude for a single orid\n (such as provided by QED). We need the most recent magnitude for\n a given orid, so sort on orid and lddate, then group on orid,\n then get the most recent record number (greatest lddate) for each\n group. Add that to a dictionary we will use later.\n '''\n netmag_dict = {}\n db_netmag = dblookup(db, table='netmag')\n db_netmag.sort(['orid', 'lddate'])\n db_netmag_grp = dbgroup(db_netmag, 'orid')\n if verbose:\n print \"There are %s records\" % db_netmag_grp.query('dbRECORD_COUNT')\n for i in range(db_netmag_grp.query('dbRECORD_COUNT')):\n db_netmag_grp[3] = i\n orid, [dbptr, view, end_record, start_record] = db_netmag_grp.getv('orid', 'bundle')\n if verbose:\n print \"\\t- Iteration: %s: Orid: %s, Start record: %s, End record: %s\"% (i, orid, start_record, end_record)\n db_netmag[3] = end_record - 1\n if verbose:\n print \"\\t\\t- Magnitude: %s, Magtype: %s\" % (db_netmag.getv('magnitude')[0], db_netmag.getv('magtype')[0] )\n magnitude, magtype = db_netmag.getv('magnitude', 'magtype')\n netmag_dict[orid] = { 'rec':end_record, 'magnitude':magnitude, 'magtype':magtype }\n\n '''\n if verbose:\n for key in sorted(netmag_dict.iterkeys()):\n print \"%s: %s\" % (key, netmag_dict[key])\n '''\n\n '''\n Now get the event information\n '''\n db.lookup(table='origin')\n db.join('event')\n if verbose:\n print \"Number of joined records of event and origin tables: %s\" % db.query('dbRECORD_COUNT')\n if override_timerange:\n override_oldest = now - override_timerange\n if verbose:\n print \"Override time defined - get events in the last %s seconds - 'time >= %s'\" % (override_timerange, override_oldest)\n db.subset('time >= %d' % override_oldest)\n if verbose:\n print \"Subset on time. Number of records: %s\" % db.query('dbRECORD_COUNT')\n # Join views\n # db_joined = dbjoin(db, db_netmag)\n\n if verbose:\n print \"Subset orid == prefor\"\n db.subset('orid == prefor')\n if verbose:\n print \"Number of subsetted records: %s\" % db.query('dbRECORD_COUNT')\n print \"Subset for time != NULL\"\n db.subset('time != NULL')\n if verbose:\n print \"Number of subsetted records: %s\" % db.query('dbRECORD_COUNT')\n # We want the most recent first for the comparison with nquakes\n db.sort(['time'], reverse=True)\n if verbose:\n print \"Number of sorted records: %s\" % db.query('dbRECORD_COUNT')\n if nquakes:\n if db.query('dbRECORD_COUNT') > nquakes:\n db[3] = nquakes - 1\n min_time = db.getv('time')[0]\n db.subset(\"time >= %s\" % min_time)\n else:\n override_oldest = now - override_timerange\n if verbose:\n print \"Override time defined - get events in the last %s seconds - time > %s\" % (override_timerange, override_oldest)\n db.subset(\"time >= %s\" % override_oldest)\n # Sort in normal time - we want the most recent events plotted on top\n db.sort(('time'))\n if verbose:\n print \"Number of records without subset on time: %s\" % db.query('dbRECORD_COUNT')\n '''\n Build event dictionary\n '''\n event_dict = {'metadata':{},'events':{}}\n\n '''\n Build metadata dictionary\n '''\n if nquakes:\n event_dict['metadata']['max_nquakes'] = nquakes\n event_dict['metadata']['oldest_time_readable'] = epoch2str( int(min_time), \"%H:%M UTC %A %B %o, %Y\" )\n event_dict['metadata']['oldest_time'] = int(min_time)\n event_dict['metadata']['type'] = 'event_limited'\n elif override_oldest:\n event_dict['metadata']['time_range'] = int(override_timerange)\n event_dict['metadata']['oldest_time_readable'] = epoch2str( int(override_oldest), \"%H:%M UTC %A %B %o, %Y\" )\n event_dict['metadata']['oldest_time'] = int(override_oldest)\n event_dict['metadata']['type'] = 'time_limited'\n event_dict['metadata']['modification_time'] = int(time.time())\n event_dict['metadata']['modification_time_readable'] = epoch2str( int(time.time()), \"%H:%M UTC %A %B %o, %Y\" )\n\n '''\n Build event dictionary\n '''\n events = {}\n for i in range(db.query('dbRECORD_COUNT')):\n db[3] = i\n if verbose:\n epoch_time, orid = db.getv('time', 'orid')\n print \"\\tRecord number is: %s Orid is: %d Time is: %s\" % (db[3], orid, epoch2str(epoch_time, '%Y-%m-%d %H:%M:%S'))\n\n orid = db.getv('orid')[0]\n\n if orid in netmag_dict:\n events[i] = {}\n for ef in element_fields:\n # Parse values\n if ef is 'local_timestring' or ef is 'utc_timestring' or ef is 'time':\n value = dbgetv(db, 'time')[0]\n difference = float(now) - float(value)\n if difference < 6 * 3600:\n color = 'red'\n elif difference < 12 * 3600:\n color = 'orange'\n elif difference < 24 * 3600:\n color = 'yellow'\n elif difference < 72 * 3600:\n color = 'chartreuse'\n elif difference < 168 * 3600:\n color = 'blue'\n else:\n color = 'grey'\n events[i]['color'] = color\n elif ef is 'depth':\n value = dbgetv(db, 'depth')[0]\n elif ef is 'auth':\n value = dbgetv(db, 'auth')[0]\n elif ef is 'magnitude':\n # Magnitude\n # mlval, mbval, msval, magnitudeval, magtypeval = db.getv('ml', 'mb', 'ms', 'magnitude', 'magtype')\n # Null magnitude is -999.00\n magnitudeval = netmag_dict[orid]['magnitude']\n magtypeval = netmag_dict[orid]['magtype']\n if int(magnitudeval) > 0:\n scale = magtypeval\n value = '%.1f' % magnitudeval\n else:\n scale = ''\n value = 'N/A'\n events[i]['scale'] = scale\n else:\n value = dbgetv(db, ef)\n\n # Override formatting for specific fields\n if ef is 'lat' or ef is 'lon':\n value = '%.4f' % value\n elif ef is 'local_timestring':\n value = epoch2str( value, \"%H:%M:%S %Z %A %B %o, %Y\", \"US/Pacific\" )\n elif ef is 'utc_timestring':\n value = epoch2str( value, \"%H:%M:%S UTC %A %B %o, %Y\" )\n events[i][ef] = value\n\n full_lat, full_lon = db.getv('lat', 'lon')\n events[i]['grname'] = (grname(full_lat,full_lon)).title()\n events[i]['srname'] = (srname(full_lat,full_lon)).title()\n\n event_dict['events'] = events\n\n # Dump JSON file\n f = open(bufferfile, 'w') \n json.dump(event_dict, f, sort_keys=True, indent=2)\n f.flush()\n\n # Move the file to replace the older one\n try:\n os.rename(bufferfile, finalfile)\n except OSError:\n print \"Cannot rename JSON file from %s to %s\" % (bufferfile,finalfile)\n\n if verbose:\n print \"End: Creating main JSON file '%s' for all stations %s\" % (finalfile, time.strftime(\"%a, %d %b %Y %H:%M:%S +0000\", time.gmtime()))\n\n db.close()\n return 0", "def asset_prices_load_history(asset_ticker=None,\n start_date=None,\n end_date=None,\n full=False,\n verbose=False):\n from pymongo import MongoClient\n import json\n import os\n import re\n accessdb = \"mongodb://arteech-dev:cgKpbDj0YvfzaumUtem03P2GYQgPUVSCAYImvRsLbnTN01c9ZgOziCxbvDsFcyCY81J2WhFUVmc3JIOmxT9pJw==@arteech-dev.mongo.cosmos.azure.com:10255/?ssl=true&replicaSet=globaldb&retrywrites=false&maxIdleTimeMS=120000&appName=@arteech-dev@\"\n # Life table\n\n # accessdb = \"mongodb+srv://sngoube:Came1roun*@cluster0.jaxrk.mongodb.net/asset_analytics?retryWrites=true&w=majority\"\n\n companies = ['IBM', 'AAPL', 'MSFT'] # IBM, Apple, Microsoft\n\n # ****Connect to MongoDB Atlas****\n # accessdb = 'mongodb+srv://{}:{}@{}'.format(username, psswd, link)\n try:\n server = MongoClient(accessdb)\n except:\n print('log Could not connect to MongoDB :(')\n\n for company_ticker in companies:\n\n list_data = get_historical_data(asset_ticker=company_ticker,\n start_date=start_date,\n end_date=end_date,\n verbose=verbose)\n if full is True:\n if company_ticker in server['prices'].list_collection_names():\n server['prices'][company_ticker].drop()\n server['prices'][company_ticker].insert_many(list_data)\n print(\"Disconnected!\")\n server.close()\n return\n # last_inserted = server['prices'][company_ticker].find({'Date': df_data.iloc[0][1]})\n\n # if last_inserted.retrieved > 0:\n # pass\n\n # if start_date is not None and start_date == end_date:\n\n # server['prices'][company_ticker].insert_one(\n # {\"Company\": company_ticker, \"Date\": df_data.iloc[0][0].strftime(\"%Y-%m-%d\"), \"Open\": df_data.iloc[0][1],\n # \"High\": df_data.iloc[0][2], \"Low\": df_data.iloc[0][3], \"Close\": df_data.iloc[0][4],\n # \"Volume\": df_data.iloc[0][5]})\n\n print(\"Data inserted\")", "def make_phys():\n for rn in dcm_dict.keys():\n # PPG\n if not dcm_dict[rn]['ppg_file'] == 'File missing':\n # Files\n ppg_tsv = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-cardiac.tsv.gz')\n ppg_json = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-cardiac.json')\n # TSV\n gzip_file(dcm_dict[rn]['ppg_file'],ppg_tsv)\n # JSON\n data = OrderedDict()\n data['SamplingFrequency'] = 100.0\n data['StartTime'] = -30.0\n data['Columns'] = 'cardiac'\n with open(ppg_json, 'w') as ff:\n json.dump(data, ff,sort_keys=False, indent=4)\n # Respiration\n if not dcm_dict[rn]['resp_file'] == 'File missing':\n # Files\n resp_tsv = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-respiratory.tsv.gz')\n resp_json = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-respiratory.json')\n # TSV\n gzip_file(dcm_dict[rn]['resp_file'],resp_tsv)\n # JSON\n data = OrderedDict()\n data['SamplingFrequency'] = 25.0\n data['StartTime'] = -30.0\n data['Columns'] = 'respiratory'\n with open(resp_json, 'w') as ff:\n json.dump(data, ff,sort_keys=False, indent=4)\n # ECG\n # What to do if they have PPG and ECG?\n if not dcm_dict[rn]['ecg_file'] == 'File missing':\n # Files\n ecg_tsv = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-cardiac.tsv.gz')\n ecg_json = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-cardiac.json')\n # TSV\n gzip_file(dcm_dict[rn]['resp_file'],resp_tsv)\n # JSON\n data = OrderedDict()\n data['SamplingFrequency'] = 1000.0\n data['StartTime'] = -30.0\n data['Columns'] = 'cardiac'\n with open(resp_json, 'w') as ff:\n json.dump(data, ff,sort_keys=False, indent=4)", "def write_to_json(data):\n # Serializing json \n json_object = json.dumps(data, indent = 4) \n\n date = datetime.now()\n date_str = date.strftime(\"%y%m%d_%H%M\")\n \n # Writing to sample.json \n with open(\"./intraday_data/json/i_data_{0}_{1}.json\".format(date_str, int(time.time())), \"w\") as outfile: \n outfile.write(json_object)", "def search_files_keyword_specific(keywrd_list, source_dir_path,output_file, start_dt_obj, end_dt_obj):\n\tfile_obj = open(output_file,'a')\n\tfor folder_path in [x[0] for x in os.walk(source_dir_path)]:\n\t\tfor keyword in keywrd_list:\n\t\t\tfile_list = glob.glob(os.path.join(folder_path,keyword))\n\t\t\tfor file_path in file_list:\n\t\t\t\tfile_date = datetime.fromtimestamp(os.path.getctime(file_path))\n\t\t\t\t#Date compare\n\t\t\t\tif file_date >= start_dt_obj and file_date <= end_dt_obj:\n\t\t\t\t\tfile_obj.write(file_path+'\\n')\n\t\t\t\t\tprint 'Limit :',file_date, file_path\n\t\t\t\telse:\n\t\t\t\t\tprint 'Beyond:',file_date,file_path\n\tfile_obj.close()", "def generateTheWorkspace(self):\n \"\"\"\n ########################################################################################\n This line of code will obtain the name of the Workspace.////////////////////////////////\n ########################################################################################\n \"\"\"\n theWorkspaceName = self.readThe['WorkspaceName'].get_value(0)\n\n \"\"\"\n ########################################################################################\n This block of code will generate a custom iso formatted date./////////////////////////// \n ########################################################################################\n \"\"\"\n\n theCreatedDay = datetime.today().day\n theCreatedCurrentDay = int(theCreatedDay)\n theCreatedMonth = datetime.today().month\n theCreatedCurrentMonth = int(theCreatedMonth)\n theCreatedYear = datetime.today().year\n theCreatedCurrentYear = int(theCreatedYear)\n theCreatedHour = datetime.today().hour\n theCreatedCurrentHour = int(theCreatedHour)\n theCreatedMinute = datetime.today().minute\n theCreatedCurrentMinute = int(theCreatedMinute)\n theCreatedSecond = datetime.today().second\n theCreatedCurrentSecond = int(theCreatedSecond)\n theCreatedMicrosecond = datetime.today().microsecond\n theCreatedCurrentMicrosecond = int(theCreatedMicrosecond)\n theCurrentCreatedDate = datetime(theCreatedCurrentYear, theCreatedCurrentMonth, theCreatedCurrentDay,\n theCreatedCurrentHour, theCreatedCurrentMinute, theCreatedCurrentSecond,\n theCreatedCurrentMicrosecond)\n theCreatedDate = theUpdatedDate = theCurrentCreatedDate.isoformat() + 'Z'\n\n theIntentColumn = self.readThe['Intents']\n theIntentsArray = []\n theCounter = 0\n for each in theIntentColumn:\n theIntentExamplesArray = []\n theIntentName = self.readThe['Entity'].get(theCounter)\n\n example1 = {\n \"text\": \"¿\" + theIntentName + \"?\",\n \"created\": theCreatedDate,\n \"updated\": theCreatedDate\n }\n\n example2 = {\n \"text\": \"\" + theIntentName + \"?\",\n \"created\": theCreatedDate,\n \"updated\": theUpdatedDate\n }\n example3 = {\n \"text\": \"¿Qué es un \" + theIntentName + \"?\",\n \"created\": theCreatedDate,\n \"updated\": theUpdatedDate\n }\n example4 = {\n \"text\": \"¿Que es un \" + theIntentName + \"?\",\n \"created\": theCreatedDate,\n \"updated\": theUpdatedDate\n }\n example5 = {\n \"text\": \"Qué es un \" + theIntentName + \"?\",\n \"created\": theCreatedDate,\n \"updated\": theUpdatedDate\n }\n example6 = {\n \"text\": \"Que es un \" + theIntentName + \"?\",\n \"created\": theCreatedDate,\n \"updated\": theUpdatedDate\n }\n\n example7 = {\n \"text\": theIntentName + \", ¿qué es?\",\n \"created\": theCreatedDate,\n \"updated\": theUpdatedDate\n }\n\n theIntentExamplesArray.append(example1)\n theIntentExamplesArray.append(example2)\n theIntentExamplesArray.append(example3)\n theIntentExamplesArray.append(example4)\n theIntentExamplesArray.append(example5)\n theIntentExamplesArray.append(example6)\n theIntentExamplesArray.append(example7)\n\n theClientExamples = self.readThe['Examples']\n if theClientExamples.count() > 0:\n theCustomExamples = theClientExamples.get_value(theCounter)\n each_custom_intent = str(theCustomExamples)\n if not each_custom_intent == \"nan\":\n theQuestionsArray = each_custom_intent.split(\";\")\n for each_example in theQuestionsArray:\n theCustomExampleIntent = {\n \"text\": each_example,\n \"created\": theCreatedDate,\n \"updated\": theUpdatedDate\n }\n theIntentExamplesArray.append(theCustomExampleIntent)\n else:\n print(\"There are NO client custom examples for this intent {}.\".format(theIntentName))\n else:\n print(\"Well, there are some that have, others don't.\")\n theIntents = {\n \"intent\": each,\n \"created\": theCreatedDate,\n \"updated\": theUpdatedDate,\n \"examples\": theIntentExamplesArray,\n \"description\": None\n }\n\n theIntentsArray.append(theIntents)\n theCounter += 1\n\n theEntityColumn = self.readThe['Entity']\n theEntitiesArray = []\n\n for each in theEntityColumn:\n theValuesArray = []\n each = str(each)\n theValues = {\n \"type\": \"synonyms\",\n \"value\": each,\n \"created\": theCreatedDate,\n \"updated\": theUpdatedDate,\n \"metadata\": None,\n \"synonyms\": getSynonyms(each)\n }\n theValuesArray.append(theValues)\n\n theEntities = {\n \"entity\": each,\n \"values\": theValuesArray,\n \"created\": theCreatedDate,\n \"updated\": theUpdatedDate,\n \"metadata\": None,\n \"description\": None\n }\n theEntitiesArray.append(theEntities)\n\n theLanguage = self.readThe['Language'].get_value(0)\n\n theFormattedYear = datetime.today().year\n theYearAsNumber = str(theFormattedYear)\n theFormattedMonth = datetime.today().month\n theMonthAsNumber = str(theFormattedMonth)\n theFormattedDay = datetime.today().day\n theDayAsNumber = str(theFormattedDay)\n theCreatedDateFormatted = \"{}-{}-{}\".format(theYearAsNumber, theMonthAsNumber, theDayAsNumber)\n\n theMetaDataMajorVersion = 'v1'\n theMetaDataMinorVersion = theCreatedDateFormatted\n theWorkspaceMetaDataAPI_VERSION = {\n \"major_version\": theMetaDataMajorVersion,\n \"minor_version\": theMetaDataMinorVersion\n }\n theWorkspaceMetaData = {\n \"api_version\": theWorkspaceMetaDataAPI_VERSION\n }\n\n theWorkspaceDescription = self.readThe['Description'].get_value(0)\n\n theDialogNodesArray = []\n\n theWorkspaceID = '1234'\n\n theWorkspaceCounterExamples = None\n\n theWorkspaceLearningOptOut = False\n\n theFinalWorkspace = {\n \"name\": theWorkspaceName,\n \"created\": theCreatedDate,\n \"intents\": theIntentsArray,\n \"updated\": theUpdatedDate,\n \"entities\": theEntitiesArray,\n \"language\": theLanguage,\n \"metadata\": theWorkspaceMetaData,\n \"description\": theWorkspaceDescription,\n \"dialog_nodes\": theDialogNodesArray,\n \"workspace_id\": theWorkspaceID,\n \"counterexamples\": theWorkspaceCounterExamples,\n \"learning_opt_out\": theWorkspaceLearningOptOut\n }\n\n return str(dict(theFinalWorkspace))", "def get_ride_report(startDate, endDate):\n\n results_list = []\n\n session = DB_SESSION()\n\n results = []\n\n results = session.query(Report).filter(Report.date_created>=startDate, Report.date_created<=endDate)\n\n for result in results:\n results_list.append(result.to_dict())\n print(result.to_dict())\n\n session.close()\n\n return results_list, 200", "def retrieve_citibike_data(start_year=2013, end_year=2021, target=\"data/citibike_trips_nyc/\"):\n\n for year in range(start_year, end_year):\n for month in range(1, 13):\n\n date_format = str(year) + '{:02d}'.format(month)\n print(date_format)\n # retrieve data from citibike's s3 buckets and store in zip directory\n # weird change in zip naming convention before 2017\n if year < 2017:\n urllib.request.urlretrieve(\"https://s3.amazonaws.com/tripdata/\" + date_format +\n \"-citibike-tripdata.zip\", target + date_format + \"-citibike-tripdata.zip\")\n else:\n urllib.request.urlretrieve(\"https://s3.amazonaws.com/tripdata/\" + date_format +\n \"-citibike-tripdata.csv.zip\", target + date_format + \"-citibike-tripdata.zip\")\n print(str(year) + \"-\" + str(month) + \" done\")", "def make_df_from_json(json_files, out_file):\n table = [[\"name\", \n \"cik\", \n \"city\",\n \"state\",\n \"street1\",\n \"street2\",\n \"zip_code\",\n \"year_of_incorp\", \n \"min_inv\", \n \"tot_off\", \n \"tot_sold\", \n \"tot_rem\", \n \"ind_group_type\", \n \"has_non_accred\", \n \"num_non_accred\", \n \"tot_num_inv\"\n ]] \n\n for json_dict in json_files:\n\n with open(json_dict, \"rb\") as f:\n data = json.load(f)\n print(json_dict)\n\n for i, key in enumerate(data):\n # if i % 1000 == 0:\n # print(i)\n entry = data[key] \n if entry == {}:\n #print(\"missing entry {0}\".format(i))\n continue\n row = []\n\n primary_issuer = entry[\"Primary Issuer\"]\n cik = primary_issuer[\"cik\"]\n name = primary_issuer[\"entity_name\"]\n phone = primary_issuer[\"phone\"]\n year_of_incorp = primary_issuer[\"year_of_incorp\"]\n address = primary_issuer[\"address\"]\n city = address[\"city\"]\n state = address[\"state\"]\n street1 = address[\"street1\"]\n street2 = address[\"street2\"]\n zip_code = address[\"zip_code\"]\n\n secondary_issuers = entry[\"Secondary Issuers\"]\n related_people = entry[\"Related People\"]\n \n offering_data = entry[\"Offering Data\"]\n min_inv = offering_data[\"min_investment_accepted\"]\n tot_off = offering_data[\"total_offering_amount\"]\n tot_sold = offering_data[\"total_amount_sold\"]\n tot_rem = offering_data[\"total_remaining\"]\n ind_group_type = offering_data[\"ind_group_type\"]\n has_non_accred = offering_data[\"has_non_accred\"]\n num_non_accred = offering_data[\"num_non_accred\"]\n tot_num_inv = offering_data[\"tot_num_inv\"] \n\n row = [name, \n cik, \n city,\n state,\n street1,\n street2,\n zip_code,\n year_of_incorp,\n min_inv,\n tot_off,\n tot_sold,\n tot_rem,\n ind_group_type,\n has_non_accred,\n num_non_accred,\n tot_num_inv\n ]\n\n table.append(row)\n\n df = pd.DataFrame(table)\n df.to_csv(out_file)\n\n return 0", "def get_date_range():\n start_date = request.args.get(\"start\", default=None, type=str)\n start_date = datetime.datetime.fromisoformat(start_date)\n end_date = request.args.get(\"end\", default=None, type=str)\n end_date = datetime.datetime.fromisoformat(end_date)\n\n animals = []\n for key in rd.keys(\"*\"):\n animal = json.loads(rd.get(key))\n if (\n start_date\n <= datetime.datetime.fromisoformat(animal[\"created-on\"])\n <= end_date\n ):\n animals.append(animal)\n\n return jsonify(animals)", "def buildSFOUrls(jd_start, jd_stop):\n url_list = []\n time_list = ['03z', '09z', '15z', '21z']\n delta = jd_stop-jd_start\n for i in range((delta.days)+1):\n model_file_date = jd_start + timedelta(days=i)\n base_url = ('http://opendap.co-ops.nos.noaa.gov/'\n 'thredds/dodsC/NOAA/SFBOFS/MODELS/')\n val_month, val_year, val_day = '', '', ''\n # Month.\n if model_file_date.month < 10:\n val_month = \"0\" + str(model_file_date.month)\n else:\n val_month = str(model_file_date.month)\n # Year.\n val_year = str(model_file_date.year)\n # Day.\n if model_file_date.day < 10:\n val_day = \"0\" + str(model_file_date.day)\n else:\n val_day = str(model_file_date.day)\n file_name = '/nos.sfbofs.stations.nowcast.'\n file_name += val_year + val_month + val_day\n for t in time_list:\n t_val = '.t' + t + '.nc'\n url_list.append(base_url + val_year + val_month +\n file_name + t_val)\n return url_list", "def pullGateCountDateRange(start_date, end_date):\n start_date = parser.parse(start_date)\n end_date = parser.parse(end_date) + timedelta(days=1)\n dates = []\n for single_date in daterange(start_date, end_date):\n dates.append(single_date.strftime(\"%Y-%m-%d\"))\n for i in range(len(dates)):\n req = pullGateCount(dates[i], dates[i+1])\n data = req.json()\n if req.status_code >= 400:\n print(\"Error1:\", dates[i], json.dumps(data, indent=0))\n else:\n # Load data\n for itm in data[\"results\"]:\n tmpTZD = {}\n localDT = parser.parse(itm[\"recordDate_hour_1\"]).replace(\n tzinfo=pytz.utc).astimezone(local_tz)\n tmpTZD['local_timestamp'] = localDT.isoformat()\n tmpTZD['year'] = localDT.year\n tmpTZD['month'] = localDT.month\n tmpTZD['day'] = localDT.day\n tmpTZD['hour'] = localDT.hour\n tmpTZD['minute'] = localDT.minute\n tmpTZD['second'] = localDT.second\n tmpTZD['time_zone_name'] = localDT.tzname()\n tmp = itm\n tmp['localDateTime'] = tmpTZD\n saveCybercomData(tmp)\n # print(dates[i])\n if dates[i+1] == dates[-1]:\n break\n return \"Date(s) Imported/Updated: {0}\".format(\",\".join(dates[:-1]))", "def DTOBS(start):\n session = Session(engine)\n # Query all passengers\n \n DTBOS = session.query(Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).group_by(Measurement.date).all() \n\n # Convert list of tuples into normal list\n all_DTOBS = list(np.ravel(DTBOS))\n\n return jsonify(all_DTOBS)", "def report_start_end(request):\n\n report = request.GET.get('report_id')\n lang = request.GET.get('language',None)\n usecase = request.session['usecase']\n data = get_fields_from_json()\n json_keys_to_display = data['fields']\n json_keys_to_display.extend(['journal','authors','year','volume'])\n json_keys_to_ann = data['fields_to_ann']\n json_keys = (data['all_fields'])\n\n language = request.GET.get('language',request.session['language'])\n request_auto = request.GET.get('ns_id',None)\n if request.session['mode'] == 'Robot' or (request_auto is not None and request_auto == 'Robot' and request.session['institute'] != 'PUBMED'):\n # In this case we require automatic annotation: the keys to annotate change\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n with open(os.path.join(workpath,'./automatic_annotation/auto_fields/auto_fields.json')) as out:\n data = json.load(out)\n json_keys = data['total_fields'][usecase]\n json_keys_to_ann = data['extract_fields'][usecase]\n for el in json_keys_to_ann:\n if el in json_keys_to_display:\n json_keys_to_display.remove(el)\n\n json_keys.extend(['journal', 'authors', 'year', 'volume', 'abstract', 'title'])\n json_keys_to_ann.extend(['abstract', 'title'])\n if lang is not None:\n language = lang\n json_dict = report_get_start_end(json_keys,json_keys_to_ann,report,language)\n # print(json_dict)\n return JsonResponse(json_dict)", "def process_generators(pid, generators, data_source, out_loc, start_date, end_date, debug=False):\n print(pid)\n #query = client.query_data_points(page_size=PAGE_SIZE, source=pid)\n query = try_query(pid)\n for gen_id in generators:\n print(gen_id)\n if gen_id == 'pdk-location':\n process_location(pid, data_source, out_loc, start_date, end_date)\n continue\n\n \"\"\" ema_query = query.filter(source=pid, \n generator_identifier=gen_id,\n created__gte=start_date,\n created__lte=end_date).order_by('created') \"\"\"\n ema_query = try_filter(query, pid, gen_id, start_date, end_date)\n tot_count = ema_query.count()\n count = 0\n frac = int(tot_count / 100)\n ema_df = pd.DataFrame()\n for point in ema_query:\n point_df = json_normalize(point) \n point_df.columns = point_df.columns.str.replace(\"passive-data-metadata.\", \"\", regex=False)\n \n \"\"\"\n point_df = pd.DataFrame.from_dict(point).iloc[0].to_frame().transpose()\n metadata_df = pd.Series(point['passive-data-metadata']).to_frame().transpose()\n\n point_df.reset_index(inplace=True, drop=True)\n point_df = pd.concat([metadata_df, point_df], axis=1, sort=True)\n \n point_df.drop('passive-data-metadata', axis='columns', inplace=True)\n \"\"\"\n\n ema_df = ema_df.append(point_df)\n count += 1\n if debug and (count % frac == 0):\n print(\"{0:.2f}% complete\".format(float(count)/float(tot_count)*100))\n\n ema_df['pid'] = pid \n ema_df['data_source'] = data_source\n ema_df = ema_df.reset_index(drop=True)\n print(ema_df.shape)\n #display(ema_df.head())\n pickle.dump(ema_df, open(\"{}/{}/{}.df\".format(out_loc, gen_id, pid), 'wb'), -1)", "def run(self):\n if not os.path.exists(self.output_folder):\n os.makedirs(self.output_folder)\n for entry in glob.glob(os.path.join(self.data_folder, self.data_expression)):\n f = open(entry)\n text = json.loads(f.read())\n f.close()\n self.create_page_objects(text)", "def create_csv(start: object, end: object):\n data_frame = web.DataReader(\"^DJI\", \"yahoo\", start, end)\n data_frame.to_csv(\"dow.csv\")", "def from_start_date_to_end_date(start, end):\n\n first_canonicalized = start.replace(\" \", \"\").lower()\n second_canonicalized = end.replace(\" \", \"\").lower()\n first_search_date = start.replace(\" \", \"\").lower()\n second_search_date = end.replace(\" \", \"\").lower() \n all_dates_between_start_date_and_end_date = [multiple_dates for multiple_dates in temperature_parameters_list if multiple_dates[\"date\"\n ] >= first_search_date and multiple_dates[\"date\"] <= second_search_date]\n \n if first_search_date == first_canonicalized and second_search_date == second_canonicalized:\n return jsonify(all_dates_between_start_date_and_end_date)\n\n return jsonify({\"error\": f\"{start} and {end} not found.\"}), 404", "def date(start=None, end=None):\n \n session = Session(engine)\n \n sel = [func.min(Measurement.tobs), \n func.avg(Measurement.tobs),\n func.max(Measurement.tobs)]\n\n # start\n if end == None:\n start_session = (session.query(*sel)\n .filter(Measurement.date >= start)\n .all())\n\n session.close()\n\n start_only = []\n for min, avg, max in start_session:\n start_dict = {}\n start_dict[\"min\"] = min\n start_dict[\"avg\"] = avg\n start_dict[\"max\"] = max\n start_only.append(start_dict)\n\n return jsonify(start_only)\n \n #start end\n else:\n start_end_session = (session.query(*sel)\n .filter(Measurement.date >= start)\n .filter(Measurement.date <= end)\n .all())\n\n session.close()\n\n start_end = []\n for min, avg, max in start_end_session:\n start_end_dict = {}\n start_end_dict[\"min\"] = min\n start_end_dict[\"avg\"] = avg\n start_end_dict[\"max\"] = max\n start_end.append(start_end_dict)\n\n return jsonify(start_end)", "def retrieve_citibike_jc_data(start_year_JC=2015, end_year_JC=2021, target=\"data/citibike_trips_JC/\"):\n\n for year in range(start_year_JC, end_year_JC):\n for month in range(1, 13):\n date_format = str(year) + '{:02d}'.format(month)\n\n # retrieve data from citibike's s3 buckets and store in zip directory\n # note: JC-201708 is missing a dash\n if year == 2017 and month == 8:\n urllib.request.urlretrieve(\n \"https://s3.amazonaws.com/tripdata/JC-\" + date_format + \" citibike-tripdata.csv.zip\" + \".csv.zip\", \n target + date_format + \"-citibike-tripdata.zip\")\n else:\n urllib.request.urlretrieve(\n \"https://s3.amazonaws.com/tripdata/JC-\" + date_format + \"-citibike-tripdata.csv.zip\" + \".csv.zip\",\n target + date_format + \"-citibike-tripdata.zip\")\n print(str(year) + \"-\" + str(month) + \" done\")", "def main(input_filename, json_output):\n num_parsed = 0\n num_total = 0\n by_decade = defaultdict(int)\n for row in generators.read_ndjson_file(input_filename):\n ds = row.get('date')\n if not ds:\n continue\n num_total += 1\n d = get_parsed_date(row)\n if not d:\n LOG.debug('Unable to parse date for %10s: %s' % (row['uniqueID'], ds))\n else:\n num_parsed += 1\n year = int(d[0] or d[1]) # ignore the range for now.\n by_decade[year // 10] += 1\n\n LOG.info('Parsed %d/%d dates (%.2f%%)' % (\n num_parsed, num_total, 100.0 * num_parsed / num_total))\n counts_by_decade = [\n {\n 'decade': '%d0' % decade,\n 'count': by_decade[decade]\n } for decade in sorted(by_decade.keys())\n ]\n write_as_json_to_file(counts_by_decade, json_output)", "def date(start):\n \"\"\"for all dates greater than and equal to the start date.\"\"\"\n # Create a link to the session\n session = Session(engine)\n \n # Get the start and end date of the data\n final_date = session.query(Measurements.date).order_by(Measurements.date.desc()).first()[0]\n first_date = session.query(Measurements.date).order_by(Measurements.date.asc()).first()[0]\n \n # Make sure date is in range of the available data\n if (start > final_date) or (start < first_date):\n return f\"{start} is not a proper date.</br>Try dates between {first_date} - {final_date}\"\n\n # Query the min, avg, and max temps for the given timeframe\n results = []\n while start <= final_date:\n min_temp = session.query(func.min(Measurements.tobs)).filter(Measurements.date==start).first()[0]\n avg_temp = session.query(func.avg(Measurements.tobs)).filter(Measurements.date==start).first()[0]\n max_temp = session.query(func.max(Measurements.tobs)).filter(Measurements.date==start).first()[0]\n \n # Store the information retrieved\n results.append([start, min_temp, avg_temp, max_temp])\n \n # Update the date to check the next record\n date1 = start.split(\"-\")\n date1 = dt.date(int(date1[0]), int(date1[1]), int(date1[2])) + dt.timedelta(days=1)\n start = date1.strftime(\"%Y-%m-%d\")\n\n session.close()\n\n # Create a dictionary from the query results\n date_temps = []\n for date, min_temp, avg_temp, max_temp in results:\n date_temps_dict = {}\n date_temps_dict[\"date\"] = date\n date_temps_dict[\"min_temp\"] = min_temp\n date_temps_dict[\"avg_temp\"] = round(avg_temp, 2)\n date_temps_dict[\"max_temp\"] = max_temp\n date_temps.append(date_temps_dict)\n \n return jsonify(date_temps)", "def tobs ():\n \n #query param for start and end date\n start = request.args.get('start')\n end = request.args.get('end')\n \n query_param = \".filter(date >= \" + start + \")\"\n if end != null:\n query_param = query_param + \".filter(date <= \" + end + \")\"\n\n results=[] \n if end != null:\n results = session.query(func.min(Measurement.tobs), \\\n func.avg(Measurement.tobs), \\\n func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start)\n else:\n results = session.query(func.min(Measurement.tobs), \\\n func.avg(Measurement.tobs), \\\n func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start)\\\n .filter(Measurement.date <= end)\n\n\n #make a query that goes back 12 months before that date\n results = session.query(Measurement.date, Measurement.tobs).\\\n filter(Measurement.date >= prev_year).all()\n\n all_Tobs = list(np.ravel(results))\n\n return jsonify(all_Tobs)", "def getContents(product,contentlist,outfolder=None,bounds = None,\n starttime = None,endtime = None,magrange = None,\n catalog = None,contributor = None,eventid = None,\n eventProperties=None,productProperties=None,radius=None,\n listURL=False,since=None,getAll=False):\n \n if catalog is not None and catalog not in checkCatalogs():\n raise Exception,'Unknown catalog %s' % catalog\n if contributor is not None and contributor not in checkContributors():\n raise Exception,'Unknown contributor %s' % contributor\n\n if outfolder is None:\n outfolder = os.getcwd()\n\n #make the output folder if it doesn't already exist\n if not os.path.isdir(outfolder):\n os.makedirs(outfolder)\n \n #if someone asks for a specific eventid, then we can shortcut all of this stuff\n #below, and just parse the event json\n if eventid is not None:\n try:\n outfiles = readEventURL(product,contentlist,outfolder,eventid,listURL=listURL,getAll=getAll)\n return outfiles\n except Exception,errobj:\n raise Exception,'Could not retrieve data for eventid \"%s\" due to \"%s\"' % (eventid,str(errobj))\n \n #start creating the url parameters\n urlparams = {}\n urlparams['producttype'] = product\n if starttime is not None:\n urlparams['starttime'] = starttime.strftime(TIMEFMT)\n if endtime is None:\n urlparams['endtime'] = ShakeDateTime.utcnow().strftime(TIMEFMT)\n if endtime is not None:\n urlparams['endtime'] = endtime.strftime(TIMEFMT)\n if starttime is None:\n urlparams['starttime'] = ShakeDateTime(1900,1,1,0,0,0).strftime(TIMEFMT)\n\n #if specified, only get events updated after a particular time\n if since is not None:\n urlparams['updatedafter'] = since.strftime(TIMEFMT)\n\n if bounds is not None and radius is not None:\n raise Exception,\"Choose one of bounds or radius, not both\"\n \n #we're using a rectangle search here\n if bounds is not None:\n urlparams['minlongitude'] = bounds[0]\n urlparams['maxlongitude'] = bounds[1]\n urlparams['minlatitude'] = bounds[2]\n urlparams['maxlatitude'] = bounds[3]\n\n #fix possible issues with 180 meridian crossings\n minwest = urlparams['minlongitude'] > 0 and urlparams['minlongitude'] < 180\n maxeast = urlparams['maxlongitude'] < 0 and urlparams['maxlongitude'] > -180\n if minwest and maxeast:\n urlparams['maxlongitude'] += 360\n\n if radius is not None:\n urlparams['latitude'] = radius[0]\n urlparams['longitude'] = radius[1]\n urlparams['maxradiuskm'] = radius[2]\n\n if magrange is not None:\n urlparams['minmagnitude'] = magrange[0]\n urlparams['maxmagnitude'] = magrange[1]\n \n if catalog is not None:\n urlparams['catalog'] = catalog\n if contributor is not None:\n urlparams['contributor'] = contributor\n\n #search parameters we're not making available to the user (yet)\n urlparams['orderby'] = 'time-asc'\n urlparams['format'] = 'geojson'\n params = urllib.urlencode(urlparams)\n url = URLBASE % params\n #fh = urllib2.urlopen(url)\n fh = getURLHandle(url)\n feed_data = fh.read()\n fh.close()\n fdict = json.loads(feed_data)\n outfiles = []\n earthquakes_features = []\n for feature in fdict['features']:\n if eventProperties is not None:\n skip=False\n for key,value in eventProperties.iteritems():\n if not feature['properties'].has_key(key):\n skip=True\n break\n else:\n fvalue = feature['properties'][key]\n if fvalue is None:\n skip=True\n break\n if fvalue.lower() != value.lower():\n skip=True\n break\n if skip:\n continue\n eid = feature['id']\n lat,lon,depth = feature['geometry']['coordinates']\n mag = feature['properties']['mag']\n efiles = readEventURL(product,contentlist,outfolder,eid,listURL=listURL,productProperties=productProperties)\n # outfiles += efiles\n # outfiles.append(efiles)\n feature['shakemap_url'] = efiles\n earthquakes_features.append(feature)\n \n\n return earthquakes_features", "def create_analysis():\n \n date_now = datetime.now()\n for analysis in Analysis.objects.filter(activated=True):\n\t\n\tif analysis.last_report == None or analysis.last_report <= date_now - timedelta( seconds=PERIOD_CHOICES[analysis.interval]):\n\t \n\t if analysis.last_report != None and analysis.interval == 'n':\n\t\tcontinue\n\t \n\t results = []\n\t for report in analysis.queries.filter(activated=True):\n\t\t\n\t\tif analysis.date_from != None and analysis.date_to != None:\n\t\t report_results = ReportResult.objects.filter(report=report, run_date__lte=analysis.date_to, run_date__gte=analyses.date_from).order_by('run_date') \n\t\telif analysis.date_from == None and analysis.date_to != None:\n\t\t report_results = ReportResult.objects.filter(report=report, run_date__lte=analysis.date_to).order_by('run_date')\n\t\telif analysis.date_from != None and analysis.date_to == None:\n\t\t report_results = ReportResult.objects.filter(report=report, run_date__gte=analyses.date_from).order_by('run_date')\n\t\telse:\n\t\t report_results = ReportResult.objects.filter(report=report).order_by('run_date')\n\t\t\n\t\t# create output from mongo output\n\t\toutput_result = OutputResult(report=report.title)\n\t\toutput_result.date_array = []\n\t\toutput_result.output_array = []\n\t\tprint \"\\n KOLIK: \"+ str(output_result.output_array)\n\t\tfor result in report_results:\n\t\t output_result.date_array.append(result.run_date)\n\t\t #print result.output\n\t\t #print \"\\nouttest: \"+str(output_result.output_array)\n\t\t mongo_output = OutputMongo(result.output)\n\t\t output_result.output_array.append(mongo_output.getoutput())\n\n\t\tprint \"out: \",output_result.output_array\n\t\tresults.append(output_result) \n\n\n\t #print results[0].output_array\n\t #print \"\\n\\n\"\n\t #print results[1].output_array\n\t # process outputs\n\t if not process_output_reports(results, analysis, date_now):\n\t\tprint \"Error in execute analysis: %s\" % (analysis.title)\n\t\tcontinue\n\t \n\t if analysis.interval != 'n':\n\t\tif analysis.date_to != None:\n\t\t analysis.date_to = analysis.date_to + timedelta( seconds=PERIOD_CHOICES[analysis.interval])\n\t\tif analysis.date_from != None:\n\t\t analysis.date_from = analysis.date_from + timedelta( seconds=PERIOD_CHOICES[analysis.interval])\n\t\t \n return True", "def future_date_to_json_obj(start_date):\r\n\r\n json_obj = {\r\n \"start\": pd.to_datetime(start_date).strftime(format=\"%Y-%m-%d\"),\r\n \"target\": []\r\n }\r\n\r\n return json_obj", "def getDatafromWebService(self,region, station, start_date, end_date):\n #construct filename in the format \"region_station_startdate_enddate.json\" with no spaces and \"-\"\n \"\"\"\n filename = region + \"_\" + station+ \"_\" + start_date + \"_\" + end_date + \".json\"\n filename = filename.replace(\" \",\"\")\n filename = filename.replace(\"-\",\"\")\n print (\"filename: \"+filename)\n \"\"\"\n #date format for getting data from web service = yy/mm/dd\n obj = RegionData()\n stationcode = obj.getStaionCode(region, station)\n newStart_Date = datetime.datetime.strptime(start_date, \"%m/%d/%Y\").strftime(\"%Y-%m-%d\")\n newEnd_Date = datetime.datetime.strptime(end_date, \"%m/%d/%Y\").strftime(\"%Y-%m-%d\")\n\n server = SOAPpy.SOAPProxy(\"http://cdmo.baruch.sc.edu/webservices2/requests.cfc?wsdl\")\n\n #stationcode=\"pdbjewq\"\n responsedata = server.exportAllParamsDateRangeXMLNew(stationcode, newStart_Date, newEnd_Date,'*')\n #responsedata = server.exportAllParamsDateRangeXMLNew('pdbjewq','2014-12-30', '2014-12-31', '*')\n\n # print responsedata\n pythonObject = SOAPpy.Types.simplify(responsedata)\n #jsonObject = json.dumps(pythonObject)\n #assert type(jsonObject) == str\n dataArray = pythonObject[\"returnData\"][\"data\"] # returns { [{...},{....},.....]}\n\n #data from webservice has date format mm/dd/yy = 12/31/2014\n #print(dataArray)\n\n return json.dumps(dataArray)\n \"\"\"\n print (dataArray)\n self.dataToJson(dataArray, filename) # store the data into a json file\n #store data into rawdata collection\n \n rawObj =RawData()\n rawObj.insertRawStationData(region,station,start_date,end_date,dataArray)\n \n #return filename # return the json filename where data is stored\n \"\"\"", "def zeetemps(start_date):\n print(\"server received request for tobs stats start to end of data...\")\n # correct for dates before the start of our data\n if start_date < '2010-01-01':\n start_date = '2010-01-01'\n # set end date\n end_date = '2017-08-23'\n range_df = temps_df[(temps_df['date'] >= start_date) & (temps_df['date'] <= end_date)]\n lowest = range_df['tobs'].min()\n highest = range_df['tobs'].max()\n average = range_df['tobs'].mean()\n output = {'TMIN': lowest, 'TMAX': highest, 'TAVG': average}\n return jsonify(output)", "def process():\n jsonObj = request.get_json()\n\n # Start by pulling out the date toggle state\n for key, value in jsonObj.iteritems():\n if isinstance(value, dict):\n for k in value.keys():\n if k == \"toggledaterange\":\n datetoggle = value[k]\n del jsonObj[key][k]\n\n # Next modify the date\n if datetoggle == False:\n jsonObj[\"date\"] = jsonObj[\"date\"][\"sdate\"]\n else:\n l = []\n start = jsonObj[\"date\"][\"start\"].encode('unicode-escape')\n end = jsonObj[\"date\"][\"end\"].encode('unicode-escape')\n l.append(str({\"start\": start, \"end\": end}))\n jsonObj[\"date\"] = l\n\n # Start an html list\n htmlResult = \"<ul>\"\n # Trim whitespace from the values of the json object\n for key, value in jsonObj.iteritems():\n # The value is a string\n if isinstance(value, basestring):\n stripped = value.strip(' \\t\\n\\r')\n htmlResult += \"<li><b>\" + key + \"</b>: \" + stripped + \"</li>\"\n # The value is a list\n else:\n stripped = []\n htmlResult += \"<li><b>\" + key + \"</b>:<ul>\"\n for item in value:\n item = item.strip(' \\t\\n\\r')\n stripped.append(item)\n htmlResult += \"<li>\" + item + \"</li>\"\n htmlResult += \"</ul></li>\"\n jsonObj[key] = stripped\n htmlResult += \"</ul>\"\n # The html has to be packaged in the json object for Flask to return it in the response.\n # Move the rest of the json for the database to another variable.\n jsonForDB = jsonObj\n htmlResult = {\"htmlResult\": htmlResult}\n jsonObj.update(htmlResult)\n jsonResult = json.dumps(jsonObj, sort_keys=False, indent=4, separators=(',', ': '))\n\n # Change slashes to commas in the path\n jsonForDB[\"path\"] = jsonForDB[\"path\"].replace(\"/\", \",\")\n jsonForDB = json.dumps(jsonForDB, sort_keys=False, indent=4, separators=(',', ': '))\n\n # Build a pymongo command to insert the data in the database. This should probably be moved \n # to a separate function. Database data will not be saved unless active is set to True.\n active = True\n if active == True:\n client = MongoClient(db_config)\n db = client['Publications']\n publications = db['Publications']\n # Straightforward insert -- publications.insert(jsonForDB)\n # Upsert is better because it works for add and edit\n id = jsonForDB.pop(\"_id\")\n publications.update({\"_id\": id}, {\"$set\": jsonForDB}, upsert=True)\n\n # Return the Ajax response\n return jsonResult", "def main(data, eddy, eddy_input):\n \n data_json = {\n 'eddy_input_flag':eddy_input.KnowsParameters(),\n 'eddy_input':eddy_input.GetParameters(),\n \n 'data_file_eddy':data['subj_id'],\n 'data_file_mask':data['mask_id'],\n 'data_file_bvals':data['bvals_id'],\n 'data_no_dw_vols':data['no_dw_vols'].tolist(),\n 'data_no_b0_vols':data['no_b0_vols'].tolist(),\n 'data_no_PE_dirs':data['no_PE_dirs'],\n 'data_protocol':data['protocol'].tolist(),\n 'data_no_shells':data['no_shells'].tolist(),\n 'data_unique_bvals':data['unique_bvals'].tolist(),\n 'data_unique_pes':data['unique_pedirs'].tolist(),\n 'data_eddy_para':data['eddy_para'].tolist(),\n 'data_vox_size':data['vox_size'][0:3].tolist(),\n\n 'qc_path':data['qc_path'],\n 'qc_mot_abs':round(eddy['avg_abs_mot'], 2),\n 'qc_mot_rel':round(eddy['avg_rel_mot'], 2),\n 'qc_params_flag':eddy['paramsFlag'],\n 'qc_params_avg':eddy['avg_params'].tolist(),\n 'qc_s2v_params_flag':eddy['s2vFlag'],\n 'qc_s2v_params_avg_std':eddy['avg_std_s2v_params'].tolist(),\n 'qc_field_flag':eddy['fieldFlag'],\n 'qc_vox_displ_std':eddy['std_displacement'].tolist(),\n 'qc_ol_flag':eddy['olFlag'],\n 'qc_outliers_tot':eddy['tot_ol'],\n 'qc_outliers_b':eddy['b_ol'].tolist(),\n 'qc_outliers_pe':eddy['pe_ol'].tolist(),\n 'qc_cnr_flag':eddy['cnrFlag'],\n 'qc_cnr_avg':eddy['avg_cnr'].tolist(),\n 'qc_cnr_std':eddy['std_cnr'].tolist(),\n 'qc_rss_flag':eddy['rssFlag'],\n }\n\n # Write dictionary to json\n with open(data['qc_path'] + '/qc.json', 'w') as fp:\n json.dump(data_json, fp, sort_keys=True, indent=4, separators=(',', ': '))", "def create_dicts(self):\n \n # remove this string from filename to make output file names more manageable\n pre_output1 = self.file1.replace(\"_Guys121919_CGH_1100_Jul11\", '')\n pre_output2 = self.file2.replace(\"_Guys121919_CGH_1100_Jul11\", '')\n \n # Build the output file name.\n # if prefix is present add it\n if self.out_file_prefix is not None:\n # concatenate prefix, filenames and dyes into output filename file1_file1_dye_file2_file2_dye.txt\n self.outputfilename = self.out_file_prefix+pre_output1.replace(\".txt\", '') + \"_\" + self.file1_dye + \"_\" + pre_output2.replace(\".txt\", '') + \"_\" + self.file2_dye + \".txt\"\n # if no prefix don't add it!\n else:\n # concatenate filenames and dyes into output filename file1_file1_dye_file2_file2_dye.txt\n self.outputfilename = pre_output1.replace(\".txt\", '') + \"_\" + self.file1_dye + \"_\" + pre_output2.replace(\".txt\", '') + \"_\" + self.file2_dye + \".txt\"\n\n # add temp to end of file name to create a temporary output filename\n self.tempoutputfilename = self.outputfilename.replace(\".txt\", '') + \"temp.txt\"\n\n # open temp output file\n self.tempoutputfile = open(self.outputfolder + self.tempoutputfilename, 'w')\n\n \n # open FE files\n file1_open = open(self.chosenfolder + self.file1, 'r')\n file2_open = open(self.chosenfolder + self.file2, 'r')\n\n # open file1 and create a dict of the features.\n for linenumber, line in enumerate(file1_open):\n if linenumber >= 10:\n splitline = line.split('\\t')\n self.file1_dict[int(splitline[1])] = line\n # get n of rows in file1 (take the linenumber of the last line)\n self.file1_len = linenumber\n\n # repeat for features in second file but first writing the feparam and stats to temp file - when pairing with control this ensures the \"header\" comes from the test (file2) not control (file1), NB NEITHER ARE ACCURATE!!!!\n for linenumber, line in enumerate(file2_open):\n if linenumber < 10:\n self.tempoutputfile.write(line)\n # then add all features to a dictionary, with the unique feature number as a key\n if linenumber >= 10:\n splitline = line.split('\\t')\n self.file2_dict[int(splitline[1])] = line\n # get n of rows in file2\n self.file2_len = linenumber\n\n # close files\n file1_open.close()\n file2_open.close()", "def export_jql_events(self, output_file, from_date, to_date, event_selectors=None, output_properties=None,\n timezone_offset=0, format='json', compress=False):\n events = self.query_jql_events(from_date=from_date, to_date=to_date, event_selectors=event_selectors,\n timezone_offset=timezone_offset, output_properties=output_properties,\n format=format)\n\n self._export_jql_items(events, output_file, format=format, compress=compress)", "def make_json(prefix, input_dir):\n # get list of files\n file_list = os.listdir(input_dir)\n # set reference sequence\n tracklist = {'formatVersion': 1,\n 'refSeqs': '%s.ref.fa.fai' % prefix,\n 'tracks': []}\n # add reference sequence track to tracklist.json\n tracklist['tracks'].append({\"category\": \"Reference sequence\",\n \"key\": \"Reference sequence\",\n \"label\": \"Reference sequence\",\n \"type\": \"SequenceTrack\",\n \"storeClass\": \"JBrowse/Store/SeqFeature/IndexedFasta\",\n \"urlTemplate\": \"%s.ref.fa\" % prefix,\n \"refSeqOrder\": \"False\"})\n tracklist['tracks'].append({\"category\": \"Consensus sequence\",\n \"key\": \"Consensus sequence\",\n \"label\": \"Consensus sequence\",\n \"type\": \"SequenceTrack\",\n \"storeClass\": \"JBrowse/Store/SeqFeature/IndexedFasta\",\n \"urlTemplate\": \"%s.cons.fa\" % prefix,\n \"refSeqOrder\": \"False\"})\n # add bigwig track to trackList.json\n tracklist['tracks'].append({\"category\": \"Sequence data\",\n \"key\": \"Coverage\",\n \"label\": \"Coverage\",\n \"type\": \"JBrowse/View/Track/Wiggle/XYPlot\",\n \"storeClass\": \"JBrowse/Store/SeqFeature/BigWig\",\n \"autoscale\": \"local\",\n \"urlTemplate\": \"%s.sorted.bw\" % prefix\n })\n # add BAM Sequence Coverage to trackList.json\n tracklist['tracks'].append({\"category\": \"Sequence data\",\n \"key\": \"Sequence reads (SNPs/Coverage)\",\n \"label\": \"Sequence reads (SNPs/Coverage)\",\n \"type\": \"JBrowse/View/Track/SNPCoverage\",\n \"storeClass\": \"JBrowse/Store/SeqFeature/BAM\",\n \"urlTemplate\": \"%s.sorted.capped.bam\" % prefix,\n \"cacheMismatches\": \"True\",\n \"chunkSizeLimit\": \"5000000\"\n })\n # add BAM Sequence Alignments to trackList.json\n tracklist['tracks'].append({\"category\": \"Sequence data\",\n \"key\": \"Sequence reads (Alignment)\",\n \"label\": \"Sequence reads (Alignment)\",\n \"type\": \"JBrowse/View/Track/Alignments2\",\n \"storeClass\": \"JBrowse/Store/SeqFeature/BAM\",\n \"urlTemplate\": \"%s.sorted.capped.bam\" % prefix,\n # add bigwig histogram option\n \"cacheMismatches\": \"True\",\n \"chunkSizeLimit\": \"5000000\"\n })\n # add GFF3 file to trackList.json\n tracklist['tracks'].append({\"category\": \"Annotation\",\n \"key\": \"Genbank annotation\",\n \"label\": \"Genbank annotation\",\n \"type\": \"JBrowse/View/Track/CanvasFeatures\",\n \"urlTemplate\": \"%s.gff3.gz\" % prefix,\n \"style\": {\n \"_defaultHistScale\": 4,\n \"_defaultLabelScale\": 30,\n \"_defaultDescriptionScale\": 120,\n # Comma-separated list of case-insensitive feature tags to use\n # for showing the feature's label.\n # The first one found will be used. Default 'name,id'.\n \"label\": \"product,id\",\n # style→description\tComma-separated list of case-insensitive\n # feature tags to check for the feature's long description.\n # The first one found will be used. Default 'note,description'.\n # If blank no description is used.\n \"description\": \"note, description\"\n },\n })\n\n json_path = os.path.join(input_dir, 'trackList.json')\n with open(json_path, 'wt') as output_handle:\n json_raw_str = json.dumps(tracklist, indent=4)\n output_handle.write(json_raw_str)\n return 'trackList.json'", "def compile_end_beg_dates(infile):\r\n filelist = glob.glob(infile)\r\n f = {}\r\n\r\n # iterate through list of relevant files\r\n for infile in filelist:\r\n f[getfilename(infile)] = new_trans_imp(infile)\r\n\r\n dflist = []\r\n for key, val in f.items():\r\n if val is not None:\r\n dflist.append((key, val.index[0], val.index[-1]))\r\n\r\n df = pd.DataFrame(dflist, columns=['filename', 'beginning', 'end'])\r\n return df", "def generate_data(out_fname, data_directory):\n def store_result(duration, loci_number):\n \"\"\" Store result of current timing run\n \"\"\"\n print(' %ds for %d loci' % (duration, loci_number))\n\n if os.path.isfile(out_fname):\n with open(out_fname, 'r') as fd:\n cur = json.load(fd)\n else:\n cur = []\n\n with open(out_fname, 'w') as fd:\n cur.append((loci_number, duration))\n json.dump(cur, fd)\n\n for fn in os.listdir(data_directory):\n fname = os.path.join(data_directory, fn)\n\n print('Loading \"%s\"...' % fname, end=' ', flush=True)\n contacts = np.loadtxt(fname)\n print('Done')\n\n start = time.time()\n try:\n apply_shrec3d(contacts)\n except:\n print('>>> Some error occured')\n traceback.print_exc()\n end = time.time()\n\n store_result(end-start, contacts.shape[0])", "def pip_hmt(start_timestamp, end_timestamp, city='Torino'):\n\n pipeline = [\n {\"$match\":\n {\"city\": city}\n #{\"init_time\": {\"$gte\": start_timestamp}},\n #{\"final_time\": {\"$lte\": end_timestamp}}\n },\n {\"$group\": {\n \"_id\": {\"day\": {\"$dayOfYear\": \"$init_date\"}, \"year\": {\"$year\": \"$init_date\"}},\n \"plate\": {\"$addToSet\": \"$plate\"}},\n\n },\n {\n \"$unwind\": \"$plate\"\n },\n {\n \"$group\": {\n \"_id\": {\"day\": \"$day\", \"year\": \"$year\"},\n \"Count\": {\"$sum\": 1}\n }\n }\n\n ]\n\n return pipeline", "def run(params, conn, outputfile):\n date_begin = parse(params['date_begin'] + ' 00:00:00 +0700')\n date_end = parse(params['date_end'] + ' 23:59:59 +0700')\n domain_id = params['domain_id']\n authority_ids = params['authority_ids']\n\n covid_report_type_id = fetch_report_type_id(conn, 'surveillance-covid-19', domain_id)\n main_data = fetch_data(conn, date_begin, date_end, authority_ids, domain_id, covid_report_type_id)\n\n covid_report_type_id = fetch_report_type_id(conn, 'surveillance-covid-19-followup', domain_id)\n follow_data = fetch_data(conn, date_begin, date_end, authority_ids, domain_id, covid_report_type_id)\n\n line_list = join(main_data, follow_data)\n tabular(line_list)\n\n if len(line_list) == 0:\n return False\n\n df = pandas.DataFrame(line_list)\n df['date'] = df['date'].dt.tz_convert(tz)\n df['date'] = df['date'].dt.strftime('%d/%m/%Y %H:%M')\n writer = pandas.ExcelWriter(outputfile)\n df.to_excel(writer, 'covid-19', columns=['report_id', 'name', 'gender', 'age',\n 'village_no', 'village', 'tumbols', 'amphurs',\n 'arrival_date_village', 'mobile_phone',\n 'risk_factor', 'symptom_check', 'symptom_covid',\n 'date', 'latitude', 'longitude',\n '01', '02', '03', '04', '05', '06',\n '07', '08', '09', '10', '11', '12', '13', '14'], index=False)\n ldf = pandas.DataFrame(flat(main_data))\n ldf['date'] = ldf['date'].dt.tz_convert(tz)\n ldf.sort_values(by=['date'], inplace=True)\n ldf['date'] = ldf['date'].dt.strftime('%d/%m/%Y %H:%M')\n\n def is_followup(row):\n return row['report_id'] != row['group_id']\n\n ldf['followup'] = ldf.apply(is_followup, axis=1)\n ldf.to_excel(writer,\n 'all',\n columns=['report_id', 'group_id', 'followup', 'name', 'gender', 'age',\n 'village_no', 'village', 'tumbols', 'amphurs',\n 'arrival_date_village', 'mobile_phone',\n 'risk_factor', 'symptom_check', 'symptom_covid',\n 'total_times', 'activity_other',\n 'date', 'latitude', 'longitude'],\n index=False)\n writer.save()\n return True", "def SETOBS(start, end):\n session = Session(engine)\n # Query all passengers\n\n SETBOS = session.query(Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).filter(Measurement.date <= end).group_by(Measurement.date).all() \n\n # Convert list of tuples into normal list\n all_SETOBS = list(np.ravel(SETBOS))\n\n return jsonify(all_SETOBS)", "def startEnd(start, end):\n # * Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range.\n # * When given the start and the end date, calculate the `TMIN`, `TAVG`, and `TMAX` for dates between the start and end date inclusive.\n session = Session(engine)\n results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date>=start, Measurement.date<=end).all()\n session.close()\n s_e_descriptors = list(np.ravel(results))\n \n return jsonify(s_e_descriptors)", "def main():\n yesterday_datestring = (datetime.date.today() - datetime.timedelta(1)).isoformat()\n parser = argparse.ArgumentParser(description='generate TimeseriesArrays on local backend')\n parser.add_argument('--url', default=\"https://datalogger-api.tirol-kliniken.cc/DataLogger\", help=\"url of DataLogger Webapplication\")\n parser.add_argument('--logdir', default=\"/data1/haproxy_daily/\", help=\"directory where to find day sorted haproxylogs\")\n parser.add_argument(\"-b\", '--back', help=\"how many days back from now\")\n parser.add_argument(\"-s\", '--startdate', help=\"start date in isoformat YYY-MM-DD\")\n parser.add_argument(\"-e\", '--enddate', default=yesterday_datestring, help=\"stop date in isoformat YYY-MM-DD\")\n parser.add_argument(\"-q\", '--quiet', action='store_true', help=\"set to loglevel ERROR\")\n parser.add_argument(\"-v\", '--verbose', action='store_true', help=\"set to loglevel DEBUG\")\n args = parser.parse_args()\n if args.quiet is True:\n logging.getLogger(\"\").setLevel(logging.ERROR)\n if args.verbose is True:\n logging.getLogger(\"\").setLevel(logging.DEBUG)\n if (args.back is not None) == (args.startdate is not None):\n logging.error(\"option -b and -e are mutual exclusive, use only one\")\n sys.exit(1)\n startdate = None\n if args.back is not None:\n startdate = (datetime.date.today() - datetime.timedelta(int(args.back))).isoformat()\n elif args.startdate is not None:\n startdate = args.startdate\n else:\n logging.error(\"you have to provide either -b or -s\")\n sys.exit(1)\n # lets get started\n datalogger = DataLoggerWeb(args.url)\n project = \"haproxy\"\n tablename = \"http_host\"\n baseurl = \"%s/upload_raw_file/\" % args.url\n logdir = args.logdir # where to find haproxy logs\n keys = (\"http_host\", )\n values = (\"bytes_read\", \"rsp_1xx\", \"rsp_2xx\", \"rsp_3xx\", \"rsp_4xx\", \"rsp_5xx\", \"rsp_other\", \"srv_queue\", \"backend_queue\", \"actconn\", \"feconn\", \"beconn\", \"srv_conn\", \"retries\", \"tq\", \"tw\", \"tc\", \"tr\", \"tt\", \"hits\")\n ts_keyname = \"ts\"\n for datestring in datewalk(startdate, args.enddate):\n caches = datalogger.get_caches(project, tablename, datestring)\n if caches[\"tsa\"][\"raw\"] is not None:\n logging.info(\"Skipping this datestring, raw data is already available\")\n continue\n try:\n stringio = generate_datalogger_csv(logdir, datestring, keys, values, ts_keyname)\n #upload data\n files = {'myfile': stringio}\n url = \"/\".join((baseurl, project, tablename, datestring))\n logging.info(\"calling %s\", url)\n response = requests.post(url, files=files)\n print response.content\n except StandardError as exc:\n logging.error(\"Exception on file datestring %si, skipping this date\", datestring)\n except zlib.error as exc:\n logging.error(exc)", "def get_data(table_name, end, num, start=None):\n if start == None:\n if table_name == \"days\": start = end - timedelta(days=num-1) \n if table_name == \"weeks\": start = end - timedelta(weeks=num-1) \n if table_name == \"months\": start = end - relativedelta(months=+num-1) \n if table_name == \"years\": start = end - relativedelta(years=+num-1) \n else: \n start = days.get_entry(table_name, start).date\n \n dates = []\n data = []\n weather = []\n density = []\n \n while start <= end:\n entry = days.get_entry(table_name, start)\n data.append(entry.sentiment)\n \n if table_name == \"days\": \n dates.append(entry.date.strftime(\"%B %d, %Y\"))\n start = start + timedelta(days=1)\n if table_name == \"weeks\": \n dates.append(entry.date.strftime(\"%B %d, %Y\"))\n start = start + timedelta(weeks=1) \n if table_name == \"months\": \n dates.append(entry.date.strftime(\"%B %Y\"))\n start = start + relativedelta(months=+1) \n if table_name == \"years\": \n dates.append(entry.date.strftime(\"%Y\"))\n start = start + relativedelta(years=+1) \n\n # 7/15/15 is the last entry in the current weather dictionary\n num_days = (min(start, date(2015,7,15)) - entry.date).days\n temp = {entry.date + timedelta(days=i): weather_dict[entry.date + timedelta(days=i)] for i in range(num_days)}\n weather.append(float(sum(temp.values()))/float(len(temp)))\n\n if density_dict != None:\n d = max(entry.date, date(2014,7,1))\n num_days = (min(start, date(2015,7,28)) - d).days\n rho = {d + timedelta(days=i): density_dict[d + timedelta(days=i)] for i in range(num_days)}\n density.append(float(sum(rho.values()))/float(len(rho)))\n\n return dates, data, weather, density", "def gos_files_creation(annotation_file, go_namespace_studied):\n go_ontology = pronto.Ontology('http://purl.obolibrary.org/obo/go/go-basic.obo')\n\n # For each GO terms look to the namespaces associated with them.\n go_namespaces = {}\n for go_term in go_ontology:\n go_namespaces[go_term.id] = go_term.other['namespace'][0]\n\n # For each GO terms look if there is an alternative ID fo them.\n go_alt_ids = {}\n for go_term in go_ontology:\n if 'alt_id' in go_term.other:\n for go_alt in go_term.other['alt_id']:\n go_alt_ids[go_alt] = go_term.id\n\n # Genome file with genes associated with GO terms.\n df = pa.read_csv(annotation_file, sep='\\t', header=None)\n df.columns = ['Gene_Name', 'GOs']\n df.replace(np.nan, '', inplace=True)\n\n gos_in_df = []\n for gos in df['GOs']:\n for go in gos.split(','):\n if go not in gos_in_df:\n gos_in_df.append(go)\n\n df.set_index('Gene_Name', inplace=True)\n\n gene_gos = []\n for gene, row in df.iterrows():\n for go in row['GOs'].split(','):\n gene_gos.append((go, gene))\n\n dic_go_genes = {}\n for go in tqdm(gos_in_df):\n genes = []\n for gene_go in gene_gos:\n if go != '' and go not in go_namespaces:\n go = go_alt_ids[go]\n if gene_go[0] == go and go != '' and go_namespaces[go] == go_namespace_studied:\n genes.append(gene_go[1])\n if go != '':\n dic_go_genes[go] = genes\n\n print(len(dic_go_genes))\n\n delete_keys = []\n for go in dic_go_genes:\n if len(dic_go_genes[go]) < 4:\n delete_keys.append(go)\n\n for key in delete_keys:\n del dic_go_genes[key]\n print(len(dic_go_genes))\n\n df_go = pa.DataFrame.from_dict(dic_go_genes, orient='index')\n df_go.insert(0, 'Description', 'GO_terms')\n\n df_go.to_csv('go_gene.gmt', sep='\\t', header=False)\n\n df.reset_index(inplace=True)\n df_query_go = pa.concat([pa.Series(row['Gene_Name'], row['GOs'].split(','))\n for _, row in df.iterrows()]).reset_index()\n df_query_go.columns = ['GOs', 'Gene_Name']\n df_query_go = df_query_go[['Gene_Name', 'GOs']]\n df_query_go.to_csv('query_go.tsv', sep='\\t', index=False)", "def tempStatsStartEnd(start,end):\n\n # Open sessions\n session = Session(bind=engine)\n\n # Split the start and end date entered by the user in YYYY-MM-DD format\n dataStartArray = start.split(\"-\")\n dataEndArray = end.split(\"-\")\n (startyear,startmonth,startday) = dataStartArray\n (endyear,endmonth,endday) = dataEndArray\n startDate=f'{startyear}-{startmonth}-{startday}'\n endDate=f'{endyear}-{endmonth}-{endday}'\n\n # Query DB to get the max, avg and min temperature in between the dates selected\n results=session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= startDate).filter(Measurement.date <= endDate).all()\n\n # Unpacking the results\n for value in results:\n (tempMin,tempAvg,tempMax)=value\n\n # Creating a dictionary that holds the values queried\n startEndTobs={\n 'minimum temperature':round(tempMin,2),\n 'average temperature':round(tempAvg,2),\n 'maximum temperature':round(tempMax,2)\n }\n \n # Main API dictionary that just has an additional info key for the user to know what is being queried\n startEndAPI={\n 'info': f'Maximum, average and minimum temperature in F in Hawaii from {start} to {end}',\n 'results':startEndTobs\n }\n\n # Returing the main dictionary in a JSON format API response \n return(jsonify(startEndAPI))", "def entries_from_goes_ts_files(*files, default_waveunit=None, source=None):\n\n\n \"\"\"\n ts_goes = ts.TimeSeries(file)\n statinfo = os.stat(file)\n entry = DatabaseEntry(path=file)\n entry.size = statinfo.st_size\n\n #header['INSTRUME'] = header['TELESCOP']# So E.G. 'GOES 6' instead 'X-ray Detector'\n entry.instrument = ts_goes.meta.get('TELESCOP').values()\n entry.instrument = ts_goes.meta.get('TELESCOP').values()\n\n entry.wavemax = 0.8 # XRSB '1.0--8.0 $\\AA$'\n entry.wavemin = 0.05 # XRSA '0.5--4.0 $\\AA$'\n\n #\n entry.observation_time_start = ts_goes.meta.get('date-beg').values()[0]\n entry.observation_time_end = ts_goes.meta.get('date-end').values()[0]\n\n entry.metadata = ts_goes.meta.metadata[0][2]\n\n #entry.tags = [ sunpy.database.attrs.Tag('raw') ]\n \"\"\"\n\n\n for file in files:\n headers = fits.get_header(file)\n if isinstance(file, (str, six.text_type)):\n filename = file\n else:\n filename = getattr(file, 'name', None)\n statinfo = os.stat(file)\n #print('a header')\n entry = DatabaseEntry(path=filename)\n entry.size = statinfo.st_size\n\n # Add/tweak start/end entries for GOES\n if headers[0].get('TELESCOP','') != '':\n #header['INSTRUME'] = header['TELESCOP']# So E.G. 'GOES 6' instead 'X-ray Detector'\n entry.instrument = headers[0]['TELESCOP']\n elif headers[1].get('TELESCOP','') != '':\n entry.instrument = headers[1]['TELESCOP']\n if (headers[0].get('DATE-OBS','') != ''):\n if is_time_in_given_format(headers[0]['DATE-OBS'], '%d/%m/%Y'):\n start_time = datetime.strptime(headers[0]['DATE-OBS'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[0]['DATE-OBS'], '%d/%m/%y'):\n start_time = datetime.strptime(headers[0]['DATE-OBS'], '%d/%m/%y')\n else:\n start_time = parse_time(headers[0]['DATE-OBS'])\n elif (headers[1].get('DATE-OBS','') != ''):\n if is_time_in_given_format(headers[1]['DATE-OBS'], '%d/%m/%Y'):\n start_time = datetime.strptime(headers[1]['DATE-OBS'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[1]['DATE-OBS'], '%d/%m/%y'):\n start_time = datetime.strptime(headers[1]['DATE-OBS'], '%d/%m/%y')\n else:\n start_time = parse_time(headers[1]['DATE-OBS'])\n\n if (headers[0].get('DATE-END','') != ''):\n if is_time_in_given_format(headers[0]['DATE-END'], '%d/%m/%Y'):\n end_time = datetime.strptime(headers[0]['DATE-END'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[0]['DATE-END'], '%d/%m/%y'):\n end_time = datetime.strptime(headers[0]['DATE-END'], '%d/%m/%y')\n else:\n end_time = parse_time(headers[0]['DATE-END'])\n elif (headers[1].get('DATE-END','') != ''):\n if is_time_in_given_format(headers[1]['DATE-END'], '%d/%m/%Y'):\n end_time = datetime.strptime(headers[1]['DATE-END'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[1]['DATE-END'], '%d/%m/%y'):\n end_time = datetime.strptime(headers[1]['DATE-END'], '%d/%m/%y')\n else:\n end_time = parse_time(headers[1]['DATE-END'])\n else:\n end_time = start_time + timedelta(days=1,seconds=-1)\n\n # Add these to the entry\n entry.observation_time_start = start_time\n entry.observation_time_end = end_time\n\n entry.wavemax = 0.8 # XRSB '1.0--8.0 $\\AA$'\n entry.wavemin = 0.05 # XRSA '0.5--4.0 $\\AA$'\n\n if source:\n entry.source = source\n\n entry.metadata = MetaDict(headers[1])\n #entry.tags = sunpy.database.attrs.Tag('raw')\n\n #entry = DatabaseEntry(instrument='EIT', wavemin=25.0)\n\n #return entry\n yield entry", "def create_ogse_db(args):\n if args.ref_diode:\n # read reference-diode data\n xds = read_ref_diode(args.ogse_dir, args.ref_diode, args.verbose)\n\n # create new database for reference-diode data\n xds.to_netcdf(args.ogse_dir / DB_REF_DIODE,\n mode='w', format='NETCDF4',\n group='/gse_data/ReferenceDiode')\n\n if args.wav_mon:\n # read reference-diode data\n xds = read_wav_mon(args.ogse_dir, args.wav_mon, args.verbose)\n # create new database for reference-diode data\n xds.to_netcdf(args.ogse_dir / DB_WAV_MON,\n mode='w', format='NETCDF4',\n group='/gse_data/WaveMonitor')", "def generate_files(self, output_dir: str) -> None:\n full_filename = os.path.join(output_dir, self.json_file)\n with open(full_filename, 'w', encoding='utf-8') as output_file:\n json.dump(self.zidb, output_file, indent=2)\n print(file=output_file) # add terminating newline\n logging.info(\"Created %s\", full_filename)", "def observatories():\n\n obs_db = {}\n\n obs_db['PWT-Oxford'] = { 'long':'-01:15:00', \\\n 'lat':'+51:45:00', \\\n 'altitude-metres':130.0, \\\n 'timezone':'Europe/London' }\n\n obs_db['LaPalma'] = { 'lat':'+28:45:00', \\\n 'long':'-17:53:00', \\\n 'altitude-metres':2326, \\\n 'timezone':'Atlantic/Canary' }\n \n obs_db['Paranal'] = { 'lat':'-24:37:00', \\\n 'long':'-70:24:00', \\\n 'altitude-metres':2635, \\\n 'timezone':'America/Santiago' }\n\n obs_db['LaSilla'] = { 'lat':'-29:15:00', \\\n 'long':'-70:44:00', \\\n 'altitude-metres':2380, \\\n 'timezone':'America/Santiago' }\n\n obs_db['MaunaKea'] = { 'lat':'+19:50:00', \\\n 'long':'-155:28:00', \\\n 'altitude-metres':4190, \\\n 'timezone':'Pacific/Honolulu' }\n \n obs_db['SidingSpring'] = { 'lat':'-31:16:00', \\\n 'long':'+149:04:00', \\\n 'altitude-metres':1149, \\\n 'timezone':'Australia/Sydney' }\n \n obs_db['KittPeak'] = { 'lat':'+31:58:00', \\\n 'long':'-111:36:00', \\\n 'altitude-metres':2096, \\\n 'timezone':'America/Phoenix' }\n\n obs_db['CalarAlto'] = { 'lat':'+37:13:25', \\\n 'long':'-2:32:47', \\\n 'altitude-metres':2168, \\\n 'timezone':'Europe/Madrid' }\n \n obs_db['Gemini-N'] = { 'lat':'+19:49:26', \\\n 'long':'-155:28:09', \\\n 'altitude-metres':4213, \\\n 'timezone':'Pacific/Honolulu' }\n\n obs_db['Gemini-S'] = { 'lat':'-30:14:27', \\\n 'long':'-70:44:12', \\\n 'altitude-metres':2722, \\\n 'timezone':'America/Santiago' }\n\n return obs_db", "def range_temp(start,end):\n year, month, date = map(int, start.split('-'))\n date_start = dt.date(year,month,day)\n year2, month2, date2 = map(int, end.split('-'))\n date_end = dt.date(year2,month2,day2)\n # Query for tobs for definied date range\n results = session.query(func.min(Measurement.tobs),func.max(Measurement.tobs).\\\n func.avg(Measurement.tobs)).filter(Measurement.date >= date_start).filter(Measurement.date <= date_end).all()\n data = list(np.ravel(results))\n return jsonify(data)", "def main():\n \n Y1, Y2 = 2005, 2017 ### range with coordinates supplied in pre-2018 generated archive\n\n if len(sys.argv) > 1 and int(sys.argv[1]) > 0:\n Y1 = int(sys.argv[1])\n \n if len(sys.argv) > 2 and int(sys.argv[2]) > Y1:\n Y2 = int(sys.argv[2])\n \n with open('data/audit.log','w') as output:\n for Y in range(Y1, Y2):\n df = pd.read_csv('data/{}.csv'.format(Y), low_memory = False)\n output.write('\\n--- {} --------------------\\n'.format(Y))\n\n # remove `deleted` records\n df['deleted'] = df['deleted'].apply(yes_no)\n df = df[df['deleted'] == 0]\n\n # remove misc misdemeanors\n df = df[~df['category'].isin(drop)]\n\n # validate date and expand into Y,N,D,W,H\n df['dt'] = df['incident_date'].apply(extract)\n df = df[~df['dt'].isnull()]\n\n # convert from plane state to longitude-latitude\n df['ll'] = df.apply(to_lnglat, axis = 1)\n\n # init features\n features = df.loc[:,['category','stat','address','city','zip']]\n features['id'] = df['incident_id']\n dt = ['year','month','day','weekday','hour']\n for i in range(len(dt)):\n features[dt[i]] = df['dt'].apply(lambda x: x[i] )\n\n features['lng'] = df['ll'].apply(lambda x: x[0])\n features['lat'] = df['ll'].apply(lambda x: x[1])\n\n features['gang'] = df['gang_related'].apply(yes_no)\n features['category'] = df['category'].apply(collapse)\n cat = set(features.groupby(['category']).size().reset_index(name='count')['category'].tolist())\n output.write('Categories: {}\\n'.format(len(cat)))\n\n output.write('Date miss: {:.4f}%\\n'\\\n .format(100 * (1 - len(features[(features['year'] > 2000) & (~features['weekday'].isnull())])/len(features))))\n output.write('Location miss: {:.4f}%\\n'\\\n .format(100 * (1 - len(features[(features['zip'] > 0) | (features['lat'] > 0)])/len(features))))\n\n # keep records with valid date\n features['date'] = df['dt'].apply(lambda x: datetime.date(x[0], x[1], x[2]))\n features = features[(features['year'] > 2000) & (~features['weekday'].isnull())]\n output.write('Time miss: {:.4f}%\\n'.format(100 * len(features[features['hour'] == -1])/len(features)))\n\n # potential `time-unknown` issue\n output.write('Hour ZERO: {:.4f}%\\n'.format(100 * len(features[features['hour'] == 0])/len(features)))\n output.write('Hour NOON: {:.4f}%\\n'.format(100 * len(features[features['hour'] == 12])/len(features)))\n\n features = features[(features['zip'] > 0) | (features['lat'] > 0)]\n\n # get the best possible coordinates + zipcode assessment\n features[['zip','lng','lat']] = features[['zip','lng','lat']].apply(fix_location, axis = 1)\n output.write('Failed location: {:.4f}%\\n'.format(100 * len(features[features['zip'].isnull()])/len(features)))\n features = features[~features['zip'].isnull()]\n features['zip'] = df['zip'].apply(lambda x: str(x)[:5])\n \n # normalize city attr\n features = features.join(zipcodes[['zip','city']].set_index('zip'), on = 'zip', lsuffix = '_orig', rsuffix = '')\n features.loc[features['city'].isnull(), 'city'] = features.loc[features['city'].isnull(), 'city_orig']\\\n .apply(lambda x: x if type(x) == float else ' '.join([l[0].upper() + l[1:] for l in x.split()]))\n\n # reduce to LA bounding-box\n features = features[(features['lng'] > -119) & (features['lng'] < -116)]\n features = features[(features['lat'] > 32) & (features['lat'] < 35)]\n\n # save csv\n features[fields].to_csv('data/F{}.csv'.format(Y), index = False)\n features[fields].to_json('data/F{}.json'.format(Y), orient = 'records')\n output.close()", "def dates(start=None, end=None):\n\n if not end:\n end = last_year\n results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), \\\n func.max(Measurement.tobs)).filter(Measurement.date >= start).filter\\\n (Measurement.date <= end).all()\n return jsonify(results)", "def initialize_descriptive_json(json_filename,wk_dir,model_dir,obs_dir):\n output = {'provenance':{},'data':{},'metrics':{},'plots':{},'index': 'index.html','html':'index.html'}\n log_path = wk_dir + '/asop_coherence.log.txt'\n output['provenance'] = {'environment': get_env(),\n 'modeldata': model_dir,\n 'obsdata': obs_dir,\n 'log': log_path}\n with open(json_filename,'w') as output_json:\n json.dump(output,output_json, indent=2)\n\n return", "def build_data(self):\n from desiutil.io import combine_dicts\n # Loop on exposures\n odict = {}\n for qanight in self.qa_nights:\n for qaexp in qanight.qa_exps:\n # Get the exposure dict\n idict = write_qa_exposure('foo', qaexp, ret_dict=True)\n odict = combine_dicts(odict, idict)\n # Finish\n self.data = odict", "def get_Events(input, request):\n \n t_event_1 = datetime.now()\n \n global events\n \n Period = input['min_date'].split('T')[0] + '_' + \\\n input['max_date'].split('T')[0] + '_' + \\\n str(input['min_mag']) + '_' + str(input['max_mag'])\n eventpath = os.path.join(input['datapath'], Period)\n \n if os.path.exists(eventpath) == True:\n print '--------------------------------------------------------'\n \n if raw_input('Folder for requested Period:' + '\\n' + \\\n str(eventpath) + \\\n '\\n' + 'exists in your directory.' + '\\n\\n' + \\\n 'You could either:' + '\\n' + 'N: Close the program and try the ' + \\\n 'updating mode.' + '\\n' + \\\n 'Y: Remove the tree, continue the program ' + \\\n 'and download again.' + \\\n '\\n\\n' + 'Do you want to continue? (Y/N)' + '\\n') == 'Y':\n print '--------------------------------------------------------'\n shutil.rmtree(eventpath)\n os.makedirs(eventpath)\n \n else:\n print '--------------------------------------------------------'\n print 'So...you decided to update your folder...Ciao'\n print '--------------------------------------------------------'\n sys.exit()\n \n else:\n os.makedirs(eventpath)\n \n events = events_info(request)\n \n os.makedirs(os.path.join(eventpath, 'EVENT'))\n len_events = len(events)\n \n print 'Length of the events found based on the inputs: ' + \\\n str(len_events) + '\\n'\n \n for i in range(0, len_events):\n print \"Event No:\" + \" \" + str(i+1)\n print \"Date Time:\" + \" \" + str(events[i]['datetime'])\n print \"Depth:\" + \" \" + str(events[i]['depth'])\n print \"Event-ID:\" + \" \" + events[i]['event_id']\n try:\n print \"Flynn-Region:\" + \" \" + events[i]['flynn_region']\n except Exception, e:\n print \"Flynn-Region:\" + \" \" + \"NONE\"\n print \"Latitude:\" + \" \" + str(events[i]['latitude'])\n print \"Longitude:\" + \" \" + str(events[i]['longitude'])\n print \"Magnitude:\" + \" \" + str(events[i]['magnitude'])\n print \"-------------------------------------------------\"\n \n Event_cat = open(os.path.join(eventpath, 'EVENT', 'EVENT-CATALOG'), 'a+')\n Event_cat.writelines(str(Period) + '\\n')\n Event_cat.writelines('-------------------------------------' + '\\n')\n Event_cat.writelines('Information about the requested Events:' + '\\n\\n')\n Event_cat.writelines('Number of Events: ' + str(len_events) + '\\n')\n Event_cat.writelines('min datetime: ' + str(input['min_date']) + '\\n')\n Event_cat.writelines('max datetime: ' + str(input['max_date']) + '\\n')\n Event_cat.writelines('min magnitude: ' + str(input['min_mag']) + '\\n')\n Event_cat.writelines('max magnitude: ' + str(input['max_mag']) + '\\n')\n Event_cat.writelines('min latitude: ' + str(input['evlatmin']) + '\\n')\n Event_cat.writelines('max latitude: ' + str(input['evlatmax']) + '\\n')\n Event_cat.writelines('min longitude: ' + str(input['evlonmin']) + '\\n')\n Event_cat.writelines('max longitude: ' + str(input['evlonmax']) + '\\n')\n Event_cat.writelines('min depth: ' + str(input['min_depth']) + '\\n')\n Event_cat.writelines('max depth: ' + str(input['max_depth']) + '\\n')\n Event_cat.writelines('-------------------------------------' + '\\n\\n')\n Event_cat.close()\n \n \n for j in range(0, len_events):\n Event_cat = open(os.path.join(eventpath, 'EVENT', 'EVENT-CATALOG'), 'a')\n Event_cat.writelines(\"Event No: \" + str(j) + '\\n')\n Event_cat.writelines(\"Event-ID: \" + str(events[j]['event_id']) + '\\n')\n Event_cat.writelines(\"Date Time: \" + str(events[j]['datetime']) + '\\n')\n Event_cat.writelines(\"Magnitude: \" + str(events[j]['magnitude']) + '\\n')\n Event_cat.writelines(\"Depth: \" + str(events[j]['depth']) + '\\n')\n Event_cat.writelines(\"Latitude: \" + str(events[j]['latitude']) + '\\n')\n Event_cat.writelines(\"Longitude: \" + str(events[j]['longitude']) + '\\n')\n \n try:\n Event_cat.writelines(\"Flynn-Region: \" + \\\n str(events[j]['flynn_region']) + '\\n')\n \n except Exception, e:\n Event_cat.writelines(\"Flynn-Region: \" + 'None' + '\\n')\n \n Event_cat.writelines('-------------------------------------' + '\\n')\n Event_cat.close()\n \n Event_file = open(os.path.join(eventpath, 'EVENT', 'event_list'), 'a+')\n pickle.dump(events, Event_file)\n Event_file.close()\n \n print 'Events are saved!'\n \n print 'Length of events: ' + str(len_events) + '\\n'\n \n t_event_2 = datetime.now()\n t_event = t_event_2 - t_event_1\n \n print 'Time for getting and saving the events:'\n print t_event\n \n return events", "def create_gen_json(self, out_file):\n\n params = self.create_package_dict()\n with open(out_file, 'w') as fp:\n json.dump(params, fp)", "def end(start_date, end_date): \n \n # Create session and save reference to table\n session = Session(engine)\n Measurement = Base.classes.measurement\n\n # Query\n trip_query = session.query(func.avg(Measurement.tobs).label('average'), func.min(Measurement.tobs).label('min'), func.max(Measurement.tobs).label('max'))\\\n .filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()\n\n trip_list = []\n for row in trip_query:\n trip_list.append(row._asdict())\n \n return jsonify(trip_list)\n\n session.close()", "def writeIcal(calendarItems):\n\n cal = Calendar()\n cal.add('prodid', '-//Gremien Kalender//opendata.stadt-muenster.de//')\n cal.add('version', '2.0')\n\n with open(OUTPUT_FILE_CSV, 'w', newline='') as csvfile:\n csvWriter = csv.writer(csvfile, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n csvWriter.writerow(['MeetingID', 'Start', 'Ende', 'Gremium', 'Veranstaltung', 'Ort', 'Weitere Information'])\n\n for key, session in sorted(calendarItems.items()):\n\n # Prepare event title (and convert datestrings to datetime objects with timezone)\n meetingId = session[5]\n sessionName = session[2]\n committee = session[3]\n location = session[4]\n start = datetime.strptime(session[0], \"%Y-%m-%dT%H:%M:%S%z\")\n end = datetime.strptime(session[1], \"%Y-%m-%dT%H:%M:%S%z\")\n meetingUrl = OPARL_MEETING_URL.format(meetingId)\n logging.info(\"Adding ical: %s %s %s\", start, committee, sessionName)\n\n # Create ical event (and convert datetimes to UTC)\n event = Event()\n event.add('summary', '{} - {}'.format(committee, sessionName))\n event.add('dtstart', start.astimezone(pytz.utc))\n event.add('dtend', end.astimezone(pytz.utc))\n event.add('dtstamp', datetime.now())\n event.add('description', meetingUrl)\n event.add('uid', '20220215T101010/{}@ms'.format(meetingId))\n\n organizer = vCalAddress('MAILTO:opendata@citeq.de')\n organizer.params['cn'] = vText('Stadt Münster')\n organizer.params['role'] = vText('Ratsinformationssytem')\n event['organizer'] = organizer\n event['location'] = vText(location)\n\n # Add event to calendar\n cal.add_component(event)\n\n # Add event to CSV\n csvWriter.writerow([meetingId, str(start), str(end), committee, sessionName, location, meetingUrl])\n\n\n # Write ical file\n f = open(OUTPUT_FILE_ICS, 'wb')\n f.write(cal.to_ical())\n f.close()", "def main(arguments):\n\n # The input file can be optionally encoded with gzip format:\n input_file = arguments.input_file[0]\n assert isinstance(input_file, str)\n if input_file.endswith(\".gz\"):\n _open = gzip.open\n else:\n _open = open\n with _open(input_file, \"rt\",\n encoding='utf-8') as fd:\n print(\"Loading JSON content into memory....\")\n raw = json.load(fd) # Parses all the input file.\n\n # Also the output file can be optionally encoded with gzip format:\n output_file = arguments.output_file[0]\n assert isinstance(output_file, str)\n uuid = 0\n if output_file.endswith(\".gz\"):\n _open = gzip.open\n else:\n _open = open\n with _open(output_file, \"wt\",\n encoding='utf-8') as fd:\n # for each element extracted from the input\n print(\"Generating distilled file\")\n for item in load_input(raw):\n uuid += 1 # generates incremental uuid from 1\n item['uuid'] = uuid\n fd.write(json.dumps(item,\n sort_keys=True))\n fd.write(\"\\n\") # one encoded document per line\n\n print(\"{} documents imported\".format(uuid))", "def run(ts):\n pgconn = get_dbconn(\"iem\")\n cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n\n utcnow = datetime.datetime.utcnow()\n\n cursor.execute(\n \"\"\"\n select id, ST_x(geom), ST_y(geom), coop_valid, pday, snow, snowd,\n extract(hour from coop_valid)::int as hour, max_tmpf as high,\n min_tmpf as low, coop_tmpf,\n name from summary s JOIN stations t ON (t.iemid = s.iemid)\n WHERE s.day = %s and t.network in ('IA_COOP', 'MO_COOP', 'KS_COOP',\n 'NE_COOP', 'SD_COOP', 'MN_COOP', 'WI_COOP', 'IL_COOP') and pday >= 0\n and extract(hour from coop_valid) between 5 and 10\n \"\"\",\n (ts.date(),),\n )\n\n res = {\n \"type\": \"FeatureCollection\",\n \"features\": [],\n \"generation_time\": utcnow.strftime(\"%Y-%m-%dT%H:%M:%SZ\"),\n \"count\": cursor.rowcount,\n }\n for row in cursor:\n res[\"features\"].append(\n dict(\n type=\"Feature\",\n id=row[\"id\"],\n properties=dict(\n pday=p(row[\"pday\"]),\n snow=p(row[\"snow\"], 1),\n snowd=p(row[\"snowd\"], 1),\n name=row[\"name\"],\n hour=row[\"hour\"],\n high=row[\"high\"],\n low=row[\"low\"],\n coop_tmpf=row[\"coop_tmpf\"],\n ),\n geometry=dict(\n type=\"Point\", coordinates=[row[\"st_x\"], row[\"st_y\"]]\n ),\n )\n )\n\n return json.dumps(res)", "def two_in_one(obs_file,et,subevent):\r\n \r\n #in this function, the \"original time window\" talked about in the comments\r\n #refers to the start and end times that were input to create the file obs_file,\r\n #which will likely have been created using the database_extraction function\r\n \r\n #opening first output file created by operational_sep_quantities\r\n with open(obs_file, 'r') as o:\r\n out = js.load(o)\r\n \r\n #all events recorded in that output file\r\n ongoing_events = (out['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['ongoing_events'])\r\n \r\n #creating lists for values from each event\r\n end_times = [] \r\n start_times = []\r\n energy_thresholds = []\r\n flux_thresholds = []\r\n out_names = []\r\n \r\n #appending values to lists for each event\r\n for i in range(len(ongoing_events)):\r\n start_times.append(parse(ongoing_events[i]['start_time']))\r\n end_times.append(parse(ongoing_events[i]['end_time']))\r\n energy_thresholds.append(ongoing_events[i]['energy_min'])\r\n flux_thresholds.append(float(ongoing_events[i]['threshold']))\r\n \r\n #checking if there was a second event for each threshold\r\n for i in range(len(end_times)):\r\n end = end_times[i]\r\n #if the end time of an event for any threshold was a day before the last day\r\n #in the original time window given, will check if ONLY THAT THRESHOLD\r\n #had another event after the first one, using the end time of the first\r\n #event of that threshold as the new start time of the event window\r\n if end.date() < et.date():\r\n print('end time to use as new start time: %s' %end)\r\n #figuring out which threshold this end time was for\r\n flux_thresh = int(flux_thresholds[i])\r\n energy_thresh = int(energy_thresholds[i])\r\n print('extracting second event for threshold ' + str(flux_thresh) + ' MeV '\r\n + str(energy_thresh) + ' pfu')\r\n #new start time (2 days in advance bc the database_extraction function\r\n #makes the start time 2 days prior, so will cancel that out)\r\n st = end + timedelta(days=2)\r\n #thresholds in correct format\r\n thresholds = str(energy_thresh) + ',' + str(flux_thresh)\r\n print('thresholds: %s' %thresholds)\r\n #creating observation data for second event for thresholds given\r\n out_names.append(Path(cfg.obs_path) /\r\n database_extraction(st,et,instrument_chosen,subevent,\r\n thresholds = thresholds,\r\n one_thresh = True))\r\n \r\n #returns list of all new files created by this function\r\n return(out_names)", "def test_getEventsForItinerary(self):\n date = {'date': '2015-08-21T00:00:00.000Z'}\n events = []\n for i in range(10):\n hh = str(i)\n events.append(dict(start = '2015-08-21T'+hh+':23:00.000Z',\n end = '2015-08-21T'+hh+':25:00.000Z',\n date = '2015-08-21T00:00:00.000Z'))\n\n rv = self.json_get('/getEventsForItinerary/bbbb', date)\n assert 'Invalid username' in str(rv.data)\n\n rv = self.json_get('/getEventsForItinerary/alex', date)\n assert 'Itinerary for the day not found' in str(rv.data)\n\n # Create sample itinerary for alex for the event day\n self.json_post('/createItinerary/alex', dict(\n name = 'New Day',\n date = date['date']\n ))\n\n rv = self.json_get('/getEventsForItinerary/alex', date)\n assert '{\"events\": []}' in str(rv.data)\n\n for e in events:\n rv = self.json_post('/createEvent/alex', e)\n uid = str('alex_' + e['start'] + e['end'])\n assert uid in str(rv.data)\n\n rv = self.json_get('/getEventsForItinerary/alex', date)\n for e in events:\n uid = str('alex_' + e['start'] + e['end'])\n assert uid in str(rv.data)\n assert e['start'] in str(rv.data)\n assert e['end'] in str(rv.data)", "def create_file(output_json):\n folder = \"data/\"\n filename = datetime.now().strftime(\"%d-%m-%Y\") + \"-moisture-read.json\"\n filepath = folder+filename\n\n # Create Local folder\n try:\n os.mkdir(folder)\n except OSError:\n pass\n #print(\"Directory already created or a failure occured on directory (%s)\" % folder)\n\n # Create Empty Json file if it doesnt exists\n if(Path(filepath)).exists():\n pass\n else:\n try:\n f = open(filepath, \"a\")\n f.write('{\\n\"moisture_iot_project\":[]\\n}')\n f.close()\n except Exception as e:\n print(\"Failure occured creating the JSON file (%s)\" % e)\n\n # Open Json file to append current structure\n with open(filepath) as outfile:\n data = json.load(outfile)\n\n # Get list with all dictionaries\n temp = data['moisture_iot_project']\n\n # Append current structure\n temp.append(output_json)\n\n # Reorganize List values and re-write to JSON file\n data['moisture_iot_project'] = temp\n write_json(data, filepath)", "def get_obsdate():\n\n#\n#--- read sot data\n#\n f = open(sot_directory, 'r')\n data = [line.strip() for line in f.readlines()]\n f.close()\n\n obsid_list = []\n start_date = []\n index_date = []\n for ent in data:\n temp = re.split('\\^', ent)\n obsid = temp[1]\n#\n#--- check the data are valid\n#\n try:\n atemp = re.split('\\s+', temp[13])\n mon = atemp[0]\n date = atemp[1]\n year = atemp[2][2] + atemp[2][3]\n except:\n continue\n#\n#--- convert month in letter into digit\n#\n for i in range(0, 12):\n if mon == month_list[i]:\n mon = i + 1\n break\n#\n#--- two forms of starting date: 05/23/14 and 20140523\n#\n lmon = str(mon)\n if int(mon) < 10:\n lmon = '0' + lmon\n ldate = str(date)\n if int(date) < 10:\n ldate = '0' + ldate\n\n dline = lmon + '/' + ldate + '/' + year\n iline = atemp[2] + lmon + ldate\n\n obsid_list.append(int(obsid))\n start_date.append(dline)\n index_date.append(iline)\n\n return (obsid_list, start_date, index_date)", "def export_events(self, output_file, params, format='json', timezone_offset=None, add_gzip_header=False,\n compress=False, request_per_day=False, raw_stream=False, buffer_size=1024):\n # Increase timeout to 20 minutes if it's still set to default, /export requests can take a long time\n timeout_backup = self.timeout\n if self.timeout == 120:\n self.timeout = 1200\n\n request_count = 0\n if request_per_day:\n date_format = '%Y-%m-%d'\n f = datetime.datetime.strptime(params['from_date'], date_format)\n t = datetime.datetime.strptime(params['to_date'], date_format)\n delta = t - f\n request_count = delta.days\n\n for x in range(request_count + 1):\n params_copy = deepcopy(params)\n current_file = output_file\n\n if request_per_day:\n d = time.strptime(params['from_date'], date_format)\n current_day = (datetime.date(d.tm_year, d.tm_mon, d.tm_mday) + datetime.timedelta(x)).strftime(\n date_format)\n file_components = output_file.split('.')\n current_file = file_components[0] + \"_\" + current_day\n if len(file_components) > 1:\n current_file = current_file + '.' + file_components[1]\n params_copy['from_date'] = current_day\n params_copy['to_date'] = current_day\n\n events = self.query_export(params_copy, add_gzip_header=add_gzip_header, raw_stream=raw_stream)\n\n if raw_stream:\n if add_gzip_header and current_file[-3:] != '.gz':\n current_file = current_file + '.gz'\n with open(current_file, 'wb') as fp:\n shutil.copyfileobj(events, fp, buffer_size)\n else:\n if timezone_offset is not None:\n # Convert timezone_offset from hours to seconds\n timezone_offset = timezone_offset * 3600\n for event in events:\n event['properties']['time'] = int(event['properties']['time'] - timezone_offset)\n\n Mixpanel.export_data(events, current_file, format=format, compress=compress)\n\n # If we modified the default timeout above, restore default setting\n if timeout_backup == 120:\n self.timeout = timeout_backup", "def test_csv_writing_of_all_engagement_details(\n engagement_started_details_to_write_from: dict,\n engagement_ended_details_to_write_from: dict,\n expected_csv_output_for_started: str,\n expected_csv_output_for_ended: str,\n) -> None:\n\n assert (\n convert_person_and_engagement_data_to_csv(\n engagement_started_details_to_write_from, started=True\n )\n == expected_csv_output_for_started\n )\n assert (\n convert_person_and_engagement_data_to_csv(\n engagement_ended_details_to_write_from, ended=True\n )\n == expected_csv_output_for_ended\n )", "def grab_dates(self, start_date, stop_date):\n if isinstance(start_date, datetime.datetime):\n start_date = start_date.date()\n if isinstance(stop_date, datetime.datetime):\n stop_date = stop_date.date()\n\n if not self.quiet:\n print(f'Grabbing data for dates '\n f'{start_date.strftime(self.date_format)} through {stop_date.strftime(self.date_format)}')\n t0 = datetime.datetime.now()\n data = pd.DataFrame()\n date_range = [dt.date() for dt in pd.date_range(start_date, stop_date).to_pydatetime()]\n for date in date_range:\n date_str = date.strftime(self.date_format)\n file_name = f'{self.file_prefix} {date_str}.csv'\n file_path = Path(self.log_drive, file_name)\n try:\n new_data = pd.read_csv(file_path,\n header=0,\n parse_dates={'datetime': ['date', 'time']},\n index_col='datetime',\n infer_datetime_format=True)\n new_data.index = pd.to_datetime(new_data.index, format=self.datetime_format)\n data = data.append(new_data)\n except FileNotFoundError:\n print(f'File not found: {file_path}')\n if not self.quiet:\n tf = datetime.datetime.now()\n dt = (tf-t0).total_seconds()\n print(f'Grabbed data for dates '\n f'{start_date.strftime(self.date_format)} through '\n f'{stop_date.strftime(self.date_format)}')\n print(f'Grabbing took {dt:.3f} s')\n return data", "def main():\r\n\r\n directory = 'D:\\\\Profession\\\\Intern\\\\Assignments\\\\Codes\\\\Assignement Codes\\\\Part 2\\\\data_dumps'\r\n path = os.path.join(directory, 'dump_3')\r\n if not (os.path.exists(path)):\r\n os.mkdir(path)\r\n\r\n for date in range(1, 31):\r\n # date-month-year\r\n # file_name1 = path + '\\\\' + str(date) + '-8-2020' + '_file1.txt'\r\n\r\n # year-month-date\r\n # file_name1 = path + '\\\\' + '2020-08-' + str(date) + '_file3.txt'\r\n\r\n # month_year_date\r\n file_name1 = path + '\\\\' + 'Aug_2020_' + str(date) + '_file5.txt'\r\n\r\n # date-month-year\r\n # file_name2 = path + '\\\\' + str(date) + '-8-2020' + '_file2.txt'\r\n\r\n # year-month-date\r\n # file_name2 = path + '\\\\' + '2020-08-' + str(date) + '_file4.txt'\r\n\r\n # month_year_date\r\n file_name2 = path + '\\\\' + 'Aug_2020_' + str(date) + '_file6.txt'\r\n\r\n rows = []\r\n for row in range(100):\r\n string = 'asddfgfhgkhjghkweoriuywoipywbnxvnmznvnmbatr'\r\n rows.append(string)\r\n with open(file_name1, 'w') as f1, open(file_name2, 'w') as f2:\r\n f1.writelines(rows)\r\n f2.writelines(rows)", "def _make_files(self):\n if not self.path.is_dir():\n raise FileNotFoundError(f\"Path {self.path} does not exist.\")\n\n # Make the filepaths\n self.file_points = self.path / \"point.dat\"\n self.file_lines = self.path / \"line.dat\"\n self.file_cadastre = self.path / \"cadastre.dat\"\n self.file_portals = self.path / \"portals.dat\"\n\n with open(self.file_points, \"w\") as f:\n # 2 lines ignored\n header = datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n f.write(header)\n self.points_dfs = []\n with open(self.file_lines, \"w\") as f:\n # 5 lines ignored\n header = (\n datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n + 3 * \"Generated: \\n\"\n + \"Name,Section,source_group,x1,y1,z1,x2,y2,z2,width,vert. ext.,-,-,\"\n \"emission_rate[kg/h/km],-,-,-,-\\n\"\n )\n f.write(header)\n with open(self.file_cadastre, \"w\") as f:\n # 1 line ignored\n header = \"x,y,z,dx,dy,dz,emission_rate[kg/h],-,-,-,source_group\\n\"\n f.write(header)\n with open(self.file_portals, \"w\") as f:\n # 2 lines ignored\n header = (\n datetime.datetime.now().strftime(\"Generated: %d/%m/%Y %H:%M\\n\")\n + \"x1,y1,x2,y2,z0,z1,emission_rate[kg/h],-,-,-,source_group\\n\"\n )\n f.write(header)\n\n\n # File to save the source groups values\n self.file_source_groups = self.path / \"source_groups.json\"\n with open(self.file_source_groups, \"w\") as f:\n # reverse the dict (items become keys and vice versa)\n reversed_source_groups = {v: k for k, v in self.source_groups.items()}\n json.dump(reversed_source_groups, f, indent=2)", "def exportOrgs ( c ) :\n assert str(type(c)) == \"<type '_mysql.connection'>\"\n xml = \"\"\n o = sqlQuery ( c, \"select * from Organizations;\" )\n for i in o:\n oL = sqlQuery ( c, \"select * from OrganizationLocations where orgID = '\"+i[0]+\"';\" )\n oER = sqlQuery ( c, \"select * from OrganizationExternalResources where orgID = '\"+i[0]+\"';\" )\n oTC = sqlQuery ( c, \"select * from OrganizationsToCrises where orgID = '\"+i[0]+\"';\" )\n pTO = sqlQuery ( c, \"select * from PeopleToOrganizations where orgID = '\"+i[0]+\"';\" )\n xml += openTagAtt ( \"Organization\", \"organizationIdent\", i[0])\n xml += openCloseTag ( \"Name\", i[1])\n xml += closeTagAtt ( \"Kind\", \"organizationKindIdent\", i[2])\n for j in oL :\n xml += openTag ( \"Location\" )\n xml += openCloseTag ( \"Locality\", j [ 1 ] )\n xml += openCloseTag ( \"Region\", j [ 2 ] )\n xml += openCloseTag ( \"Country\", j [ 3 ] )\n xml += closeTag ( \"Location\" )\n xml += openCloseTag (\"History\", i[3])\n xml += openTag ( \"ContactInfo\" )\n xml += openCloseTag (\"Telephone\", i[4])\n xml += openCloseTag (\"Fax\", i[5])\n xml += openCloseTag (\"Email\", i[6])\n xml += openTag (\"PostalAddress\")\n xml += openCloseTag (\"StreetAddress\", i[7])\n xml += openCloseTag ( \"Locality\", i[8])\n xml += openCloseTag ( \"Region\", i[9])\n xml += openCloseTag ( \"PostalCode\", i[10])\n xml += openCloseTag ( \"Country\", i[11])\n xml += closeTag ( \"PostalAddress\" )\n xml += closeTag ( \"ContactInfo\" )\n xml += openTag (\"ExternalResources\")\n for j in oER:\n xml += openCloseTag ( j[1], j[2])\n xml += closeTag (\"ExternalResources\")\n xml += openTag (\"RelatedCrises\")\n for j in oTC:\n xml += closeTagAtt (\"RelatedCrisis\", \"crisisIdent\", j[1])\n xml += closeTag (\"RelatedCrises\")\n xml += openTag (\"RelatedPersons\")\n for j in pTO:\n xml += closeTagAtt (\"RelatedPerson\", \"personIdent\", j[0])\n xml += closeTag (\"RelatedPersons\")\n xml += closeTag (\"Organization\")\n assert str ( type ( xml ) ) == \"<type 'str'>\"\n return xml", "def main():\n var_type = sys.argv[1]\n start_yr = int(sys.argv[2])\n end_yr = int(sys.argv[3])\n print(var_type,start_yr,end_yr,dataFol)\n \n data = imd.get_data(var_type, start_yr, end_yr, fn_format='yearwise',file_dir=str(dataFol)) # Path must be str because imdlib needs str", "def get_between(self, start, end):\n now = datetime.now()\n now = datetime(now.year, now.month, now.day)\n \n assert isinstance(start, datetime), 'start need to be datetime instance'\n assert isinstance(end, datetime), 'end need to be datetime instance'\n assert start < end, 'start need to be less than end'\n assert end < now, 'end need to be less or equal than yesterday'\n assert start >= start_date, 'no data before \\\"2003-01-01\\\"'\n \n strftime = datetime.strftime\n self.db.DBFILE = \\\n strftime(start, date_str) + \"+\" + strftime(end, date_str)\n \n \n # write all the data in the file at once\n lst_dict = self._helper_get_between(start, end)\n self.db.save_iter(lst_dict)", "def loop_observations ( self, start_date, end_date, step=1, fmt=\"%Y-%m-%d\" ):\n\n start_date = datetime.datetime.strptime( start_date, fmt )\n end_date = datetime.datetime.strptime( end_date, fmt )\n if start_date < self.date[0]:\n print \"No observations until %s, starting from there\" % self.date[0]\n start_date = self.date[0]\n\n if end_date > self.date[-1]:\n print \"No observations after %s, stopping there\" % self.date[-1]\n end_date = self.date[-1]\n\n delta = datetime.timedelta ( days=step )\n this_date = start_date.date()\n end_date = end_date.date() + delta\n obs_dates = [ x.date() for x in self.date ]\n while this_date < end_date:\n if this_date in obs_dates:\n iloc = obs_dates.index ( this_date )\n have_obs = True\n the_data = self._data_pntr[iloc].ReadAsArray()\n try:\n the_mask = map ( *self.masks[iloc] )\n except:\n the_mask = self.get_mask ( iloc )\n the_emulator = self.emulator[ iloc ]\n the_sza = self.sza[ iloc ]\n the_saa = self.saa[ iloc ]\n the_vza = self.vza[ iloc ]\n the_vaa = self.vaa[ iloc ]\n the_fname = self._data_pntr[iloc].GetDescription()\n try:\n the_sensor = self.sensor[iloc]\n except:\n the_sensor = self.sensor\n try:\n the_spectrum = self.spectral[iloc]\n except:\n the_spectrum = self.spectral\n\n else:\n have_obs = False\n the_data = None\n the_mask = None\n the_emulator = None\n the_sza = None\n the_saa = None\n the_vza = None\n the_vaa = None\n the_fname = None\n the_spectrum = None\n the_sensor = None\n this_date += delta\n retval = namedtuple ( \"retval\", [\"have_obs\", \"sensor\", \"date\", \"image\", \"mask\", \"emulator\",\n \"sza\", \"saa\", \"vza\", \"vaa\", \"fname\", \"spectrum\"] )\n retvals = retval ( have_obs=have_obs, sensor=the_sensor, \n date=this_date - delta, image=the_data, mask=the_mask, emulator=the_emulator, sza=the_sza,\n saa=the_saa, vza=the_vza, vaa=the_vaa, fname=the_fname, spectrum=the_spectrum )\n yield retvals", "def generate_final_data(model_names):\n\n for model_name in model_names:\n print(\"Creating fina data for \" + model_name[0])\n\n final_data = {}\n brush_data = common.load_json(\"../steps/\" + model_name[0] + \"/brush_data.json\")\n diff_data = common.load_json(\"../steps/\" + model_name[0] + \"/diff_plot_data.json\")\n distance_data = common.load_json(\"../steps/\" + model_name[0] + \"/distance_data.json\")\n\n final_data[0] = {\n \"step_number\" : 0,\n \"valid\" : brush_data['0'][\"valid\"],\n \"brush_data\" : sanitize_brush_data(brush_data['0']),\n \"diff_data\" : null_diff_data(),\n \"distance_data\" : null_distance_data()\n }\n\n for step_idx in range(1, len(brush_data)):\n print(str(step_idx) + \" \",)\n final_data[step_idx] = {}\n final_data[step_idx][\"step_number\"] = step_idx\n final_data[step_idx][\"valid\"] = brush_data[str(step_idx)][\"valid\"]\n final_data[step_idx][\"brush_data\"] = sanitize_brush_data(brush_data[str(step_idx)])\n final_data[step_idx][\"diff_data\"] = get_diff_data_step(diff_data, step_idx - 1)\n final_data[step_idx][\"distance_data\"] = get_distance_data_step(distance_data, str(step_idx))\n\n common.save_json(final_data, \"../final_data/\" + model_name[0] + \"/final_data.json\", compressed=False)" ]
[ "0.6041613", "0.58948135", "0.58275783", "0.5822768", "0.5751428", "0.5720737", "0.5717822", "0.570631", "0.57046837", "0.5676997", "0.5632432", "0.56253904", "0.5612947", "0.5587226", "0.55808955", "0.5580337", "0.5570761", "0.55477357", "0.5539858", "0.5537866", "0.5495238", "0.5489108", "0.5483286", "0.54757816", "0.54675764", "0.54600513", "0.5441669", "0.5441373", "0.54263014", "0.542037", "0.5416668", "0.54101723", "0.54080516", "0.5402486", "0.5400999", "0.5395709", "0.53933954", "0.53704107", "0.53678", "0.5359193", "0.53563374", "0.53475946", "0.53451693", "0.5342532", "0.5341892", "0.53333783", "0.53314096", "0.5323987", "0.53149694", "0.5312566", "0.5301524", "0.52914405", "0.52858996", "0.5275293", "0.5275159", "0.5270695", "0.5269669", "0.52623063", "0.5257928", "0.5237211", "0.5229809", "0.5225194", "0.5222874", "0.5222831", "0.5220748", "0.52206826", "0.52184874", "0.5216579", "0.5216576", "0.5215494", "0.5210805", "0.52066773", "0.52056396", "0.5203543", "0.5197738", "0.51906204", "0.51832765", "0.5174881", "0.5169646", "0.51671225", "0.5166534", "0.51652503", "0.51647836", "0.51634467", "0.5159213", "0.5158404", "0.51575917", "0.5155087", "0.51446533", "0.5143746", "0.51386297", "0.51380676", "0.5133999", "0.5133028", "0.5129752", "0.51256245", "0.51209664", "0.5120775", "0.5119595", "0.51175374" ]
0.5659217
10
will create JSON output files if there are two events (for each threshold) in one time window. Ie, if there are two >10MeV >10pfu events as well as two >100MeV >1pfu events, will create files for all four events, but if there are three >100MeV >1pfu events, will only generate JSON files for the first two. Second events have different thresholds in different files as opposed to together.
def two_in_one(obs_file,et,subevent): #in this function, the "original time window" talked about in the comments #refers to the start and end times that were input to create the file obs_file, #which will likely have been created using the database_extraction function #opening first output file created by operational_sep_quantities with open(obs_file, 'r') as o: out = js.load(o) #all events recorded in that output file ongoing_events = (out['sep_forecast_submission']['triggers'][0]['particle_intensity'] ['ongoing_events']) #creating lists for values from each event end_times = [] start_times = [] energy_thresholds = [] flux_thresholds = [] out_names = [] #appending values to lists for each event for i in range(len(ongoing_events)): start_times.append(parse(ongoing_events[i]['start_time'])) end_times.append(parse(ongoing_events[i]['end_time'])) energy_thresholds.append(ongoing_events[i]['energy_min']) flux_thresholds.append(float(ongoing_events[i]['threshold'])) #checking if there was a second event for each threshold for i in range(len(end_times)): end = end_times[i] #if the end time of an event for any threshold was a day before the last day #in the original time window given, will check if ONLY THAT THRESHOLD #had another event after the first one, using the end time of the first #event of that threshold as the new start time of the event window if end.date() < et.date(): print('end time to use as new start time: %s' %end) #figuring out which threshold this end time was for flux_thresh = int(flux_thresholds[i]) energy_thresh = int(energy_thresholds[i]) print('extracting second event for threshold ' + str(flux_thresh) + ' MeV ' + str(energy_thresh) + ' pfu') #new start time (2 days in advance bc the database_extraction function #makes the start time 2 days prior, so will cancel that out) st = end + timedelta(days=2) #thresholds in correct format thresholds = str(energy_thresh) + ',' + str(flux_thresh) print('thresholds: %s' %thresholds) #creating observation data for second event for thresholds given out_names.append(Path(cfg.obs_path) / database_extraction(st,et,instrument_chosen,subevent, thresholds = thresholds, one_thresh = True)) #returns list of all new files created by this function return(out_names)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def multi_event(st,et,instrument_chosen,subevent):\r\n print('checking for multiple events within given time window')\r\n \r\n #creating file for time window with first events for all thresholds\r\n out_name = Path(cfg.obs_path) / database_extraction(st,et,instrument_chosen,subevent)\r\n\r\n #creating files for all second events for all thresholds\r\n new_files = two_in_one(out_name,et,subevent)\r\n \r\n #creating files for any third events for all thresholds that had a second event\r\n for file in new_files:\r\n two_in_one(file,et,subevent) \r\n \r\n return", "def output_files(self):\n o = []\n if 'unweighted' in self.event_types:\n o.append(self.name + \"_unweighted_events.lhe.gz\")\n if 'weighted' in self.event_types:\n o.append(self.name + \"_events.lhe.gz\")\n return o", "def main():\r\n # handle arguments\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument('-t', '--time', help = 'start time', default = \"2018-12-26 18:11:08.509654\")\r\n parser.add_argument('-bd', '--min_duration', type = int, help = 'minimum duration', default = 25)\r\n parser.add_argument('-td', '--max_duration', type = int, help = 'maximum duration', default = 70)\r\n parser.add_argument('-e', '--events', type = int, help = 'how many events to generate', default = 1000)\r\n\r\n args = parser.parse_args()\r\n\r\n f = open(f\"tests/test_1.json\", \"a\")\r\n\r\n string_time = \"2019-07-08 10:40:00.423123\"\r\n\r\n current_time = datetime.datetime.strptime(string_time, '%Y-%m-%d %H:%M:%S.%f')\r\n\r\n for i in range(0, args.events):\r\n\r\n duration = random.randint(args.min_duration, args.max_duration)\r\n\r\n json = \"{\\\"timestamp\\\": \\\"\" \\\r\n + str(current_time) \\\r\n + \"\\\", \\\"translation_id\\\": \\\"5aa5b2f39f7254a75aa5\\\", \" \\\r\n \"\\\"source_language\\\": \\\"en\\\",\\\"target_language\\\":\" \\\r\n \" \\\"fr\\\",\\\"client_name\\\": \\\"easyjet\\\",\\\"event_name\\\":\" \\\r\n \"\\\"translation_delivered\\\",\\\"nr_words\\\": 30, \\\"duration\\\": \"\\\r\n + str(duration) + \"}\\n\"\r\n\r\n f.write(json)\r\n\r\n minutes = random.randint(0, 59)\r\n seconds = random.randint(0, 59)\r\n\r\n current_time += datetime.timedelta(minutes=minutes, seconds=seconds)\r\n\r\n print(f\"New file is located at inputs/{args.events}.json\")", "def get_metrics_files(project, MIN_DIFFERENCE):\n print(\"LOG: Starting with\", project)\n\n # Get the latest two metrics for this project which are MIN_DIFFERENCE days apart\n re_metrics = re.compile(r\"METRICS-\\d{4}-\\d{2}-\\d{2}.json\")\n all_metrics = []\n\n for filename in os.listdir(project):\n if re_metrics.match(filename):\n all_metrics.append(filename)\n\n all_metrics.sort()\n\n # Come back later when there are atleast two generated metrics files\n if len(all_metrics) < 2:\n return False, {}, {}\n\n current_metrics_json_file = all_metrics.pop()\n print(\"LOG: Current metrics json file\", current_metrics_json_file)\n\n # If the latest Metrics is older than MIN_DIFFERENCE, then don't generate report\n # This is possible in cases of repo turning private or moving out\n today_datestamp = datetime.datetime.now()\n latest_datestamp = datetime.datetime.strptime(current_metrics_json_file, \"METRICS-%Y-%m-%d.json\")\n datetime_delta = today_datestamp - latest_datestamp\n if datetime_delta.days > MIN_DIFFERENCE:\n print(\"Skipping report for\", project, \"Latest metrics file is older than MIN_DIFFERENCE\")\n return False, {}, {}\n\n previous_metrics_json_file = None\n previous_metrics_index_index = len(all_metrics) - 1\n while(previous_metrics_index_index >= 0):\n # Calculate difference between last two metrics\n d1 = datetime.datetime.strptime(current_metrics_json_file, \"METRICS-%Y-%m-%d.json\")\n d2 = datetime.datetime.strptime(all_metrics[previous_metrics_index_index], \"METRICS-%Y-%m-%d.json\")\n if (d1 - d2).days > MIN_DIFFERENCE:\n previous_metrics_json_file = all_metrics[previous_metrics_index_index]\n print(\"LOG: Previous metrics json\", previous_metrics_json_file)\n break\n else:\n previous_metrics_index_index -= 1\n\n # Metrics are not older than MIN_DIFFERENCE days\n if previous_metrics_json_file is None:\n return False, {}, {}\n\n return True, current_metrics_json_file, previous_metrics_json_file", "def publish_burst(burst, num_events_counter, fp):\n for event_dict in burst:\n json_str = json.dumps(event_dict)\n num_events_counter.value += 1\n fp.write(json_str + '\\n')", "def write_to_file(train_file, test_file, log_dict):\n i = 0\n train_events = []\n test_events = []\n\n for key in log_dict:\n trace = log_dict[key]\n if random.randint(0,1) == 0: # Add file to training set with 50% chance\n for e_idx in range(len(trace)):\n train_events.append(\",\".join([str(x) for x in trace[e_idx]]) + \",\" + str(key) + \",0,None\")\n else: # Add file to test set\n if random.randint(0,100) > 50: # No anomaly injection with 50% chance\n for e_idx in range(len(trace)):\n test_events.append(\",\".join([str(x) for x in trace[e_idx]]) + \",\" + str(key) + \",0,None\")\n else: # Anomaly injection\n trace, types = introduce_anomaly(trace, single=False)\n for e_idx in range(len(trace)):\n test_events.append(\",\".join([str(x) for x in trace[e_idx]]) + \",\" + str(key) + \",1,\\\"\" + str(types) + \"\\\"\")\n\n with open(train_file, \"w\") as fout:\n fout.write(\",\".join([\"Time\", \"Activity\", \"Resource\", \"Weekday\", \"Case\", \"Anomaly\", \"Type\"]) + \"\\n\")\n for e in train_events:\n fout.write(e + \"\\n\")\n\n with open(test_file, \"w\") as fout:\n fout.write(\",\".join([\"Time\", \"Activity\", \"Resource\", \"Weekday\", \"Case\", \"Anomaly\", \"Type\"]) + \"\\n\")\n for e in test_events:\n fout.write(e + \"\\n\")", "def export_json(self):\r\n export_timestamp = datetime.now().timestamp()\r\n\r\n # Application data\r\n data_dict = { # Dictionary to be exported to json file\r\n \"Monitor App\": {\r\n \"Export Timestamp\": export_timestamp,\r\n \"Servers\": list(self.get_servers()),\r\n \"Event Logs\": {\r\n server: {} for server in self.get_servers()\r\n }\r\n }\r\n }\r\n \r\n # Thread data\r\n for thread in self.get_all_threads():\r\n data_dict[\"Monitor App\"][\"Event Logs\"][thread.get_server_name()][thread.get_log_type()] = {\r\n \"Thread Start Timestamp\": thread.latest_start.timestamp(),\r\n \"Total Processed Events\": thread.get_total_processed_events(),\r\n \"Total Thread Failures\": thread.get_failure_total(),\r\n \"Event IDs\": { # Value built below\r\n # 1111: {\r\n # \"Total\": int,\r\n # \"Description\": str or None,\r\n # \"Timestamps\": [floats] or None\r\n # }\r\n }\r\n }\r\n event_ID_key = data_dict[\"Monitor App\"][\"Event Logs\"][thread.get_server_name()][thread.get_log_type()][\"Event IDs\"]\r\n try: # Build Event IDs dictionary value for data_dict\r\n for event_ID in thread.event_IDs:\r\n event_ID_key[event_ID] = {\r\n \"Total\": thread.get_total_event_occurrences(event_ID),\r\n \"Description\": thread.get_event_description(event_ID),\r\n \"Timestamps\": thread.get_event_occurrence_times(event_ID)\r\n }\r\n except KeyError as err:\r\n print(err)\r\n\r\n # Create log directory\r\n if not os.path.exists(os.path.join(\"windowseventmonitor\", \"eventlogs\")):\r\n os.mkdir(os.path.join(\"windowseventmonitor\", \"eventlogs\"))\r\n \r\n event_log_json_file = os.path.join(\"windowseventmonitor\", \"eventlogs\", f\"{export_timestamp}.json\")\r\n try: # Write to json\r\n with open(event_log_json_file, \"w\") as f:\r\n data = json.dumps(data_dict, indent = 4)\r\n f.write(data)\r\n print(\"Exported logs\")\r\n except PermissionError as err:\r\n print(err)", "def create_foders_files(events, eventpath):\n \n len_events = len(events)\n \n for i in range(0, len_events):\n if os.path.exists(os.path.join(eventpath, events[i]['event_id'])) == True:\n \n if raw_input('Folder for -- the requested Period (min/max) ' + \\\n 'and Magnitude (min/max) -- exists in your directory.' + '\\n\\n' + \\\n 'You could either close the program and try updating your ' + \\\n 'folder OR remove the tree, continue the program and download again.' + \\\n '\\n' + 'Do you want to continue? (Y/N)' + '\\n') == 'Y':\n print '-------------------------------------------------------------'\n shutil.rmtree(os.path.join(eventpath, events[i]['event_id']))\n \n else:\n print '------------------------------------------------'\n print 'So...you decided to update your folder...Ciao'\n print '------------------------------------------------'\n sys.exit()\n\n for i in range(0, len_events):\n try:\n os.makedirs(os.path.join(eventpath, events[i]['event_id'], 'BH_RAW'))\n os.makedirs(os.path.join(eventpath, events[i]['event_id'], 'Resp'))\n os.makedirs(os.path.join(eventpath, events[i]['event_id'], 'info'))\n except Exception, e:\n pass\n \n for i in range(0, len_events):\n Report = open(os.path.join(eventpath, events[i]['event_id'], \\\n 'info', 'report_st'), 'a+')\n Report.close()\n \n \n for i in range(0, len_events):\n Exception_file = open(os.path.join(eventpath, events[i]['event_id'], \\\n 'info', 'exception'), 'a+')\n eventsID = events[i]['event_id']\n Exception_file.writelines('\\n' + eventsID + '\\n')\n \n Syn_file = open(os.path.join(eventpath, events[i]['event_id'], \\\n 'info', 'station_event'), 'a+')\n Syn_file.close()\n \n if input['time_iris'] == 'Y':\n for i in range(0, len_events):\n time_file = open(os.path.join(eventpath, events[i]['event_id'], \\\n 'info', 'iris_time'), 'a+')\n time_file.close()\n \n \n for i in range(0, len_events):\n quake_file = open(os.path.join(eventpath, events[i]['event_id'],\\\n 'info', 'quake'), 'a+')\n \n quake_file.writelines(repr(events[i]['datetime'].year).rjust(15)\\\n + repr(events[i]['datetime'].julday).rjust(15) \\\n + repr(events[i]['datetime'].month).rjust(15) \\\n + repr(events[i]['datetime'].day).rjust(15) + '\\n')\n quake_file.writelines(repr(events[i]['datetime'].hour).rjust(15)\\\n + repr(events[i]['datetime'].minute).rjust(15) + \\\n repr(events[i]['datetime'].second).rjust(15) + \\\n repr(800).rjust(15) + '\\n')\n \n quake_file.writelines(\\\n ' '*(15 - len('%.5f' % events[i]['latitude'])) + '%.5f' \\\n % events[i]['latitude'] + \\\n ' '*(15 - len('%.5f' % events[i]['longitude'])) + '%.5f' \\\n % events[i]['longitude'] + '\\n')\n quake_file.writelines(\\\n ' '*(15 - len('%.5f' % abs(events[i]['depth']))) + '%.5f' \\\n % abs(events[i]['depth']) + '\\n')\n quake_file.writelines(\\\n ' '*(15 - len('%.5f' % abs(events[i]['magnitude']))) + '%.5f' \\\n % abs(events[i]['magnitude']) + '\\n')\n quake_file.writelines(\\\n ' '*(15 - len(events[i]['event_id'])) + \\\n events[i]['event_id'] + '-' + '\\n')\n \n quake_file.writelines(repr(events[i]['t1'].year).rjust(15)\\\n + repr(events[i]['t1'].julday).rjust(15) \\\n + repr(events[i]['t1'].month).rjust(15) \\\n + repr(events[i]['t1'].day).rjust(15) + '\\n')\n quake_file.writelines(repr(events[i]['t1'].hour).rjust(15)\\\n + repr(events[i]['t1'].minute).rjust(15) + \\\n repr(events[i]['t1'].second).rjust(15) + \\\n repr(800).rjust(15) + '\\n')\n \n quake_file.writelines(repr(events[i]['t2'].year).rjust(15)\\\n + repr(events[i]['t2'].julday).rjust(15) \\\n + repr(events[i]['t2'].month).rjust(15) \\\n + repr(events[i]['t2'].day).rjust(15) + '\\n')\n quake_file.writelines(repr(events[i]['t2'].hour).rjust(15)\\\n + repr(events[i]['t2'].minute).rjust(15) + \\\n repr(events[i]['t2'].second).rjust(15) + \\\n repr(800).rjust(15) + '\\n')", "def process_files_json():\n # chdir into beep root\n pwd = os.getcwd()\n os.chdir(os.environ.get(\"BEEP_ROOT\", \"/\"))\n\n meta_list = list(filter(lambda x: '_Metadata.csv' in x, os.listdir(SRC_DIR)))\n file_list = list(filter(lambda x: '.csv' in x if x not in meta_list else None, os.listdir(SRC_DIR)))\n all_list = list(filter(lambda x: '.csv' in x, os.listdir(SRC_DIR)))\n\n all_list = sorted(all_list)\n dumpfn(all_list, \"all_files.json\")\n\n [file_id, mapdf] = init_map(PROJECT_NAME, DEST_DIR)\n\n new_file_index = file_id\n\n for filename in tqdm(sorted(file_list)):\n # If the file has already been renamed another entry should not be made\n if mapdf['filename'].str.contains(filename).sum() > 0:\n continue\n old_file = os.path.join(SRC_DIR, filename)\n new_path = os.path.join(DEST_DIR, PROJECT_NAME)\n shutil.copy(old_file, new_path) # copy main data file\n shutil.copy(old_file.replace(\".csv\", '_Metadata.csv'), new_path) # copy meta data file\n\n if PROJECT_NAME == 'FastCharge':\n [date, channel_no, strname, protocol] = get_parameters_fastcharge(filename, SRC_DIR)\n elif PROJECT_NAME == 'ClosedLoopOED':\n [date, channel_no, strname, protocol] = get_parameters_oed(filename, SRC_DIR)\n else:\n raise ValueError(\"Unsupported PROJECT_NAME: {}\".format(PROJECT_NAME))\n\n df_dup = mapdf.set_index(['protocol', 'date'])\n if (protocol, date) in df_dup.index:\n row = mapdf[(mapdf['protocol'] == protocol) & (mapdf['date'] == date)]\n file_id = row['fid'].iloc[0]\n protocol = row['protocol'].iloc[0]\n date = row['date'].iloc[0]\n strname = row['strname'].iloc[0]\n else:\n file_id = new_file_index\n new_file_index = new_file_index + 1\n\n new_name = \"{}_{}_{}\".format(PROJECT_NAME, f'{file_id:06}', channel_no)\n new_file = os.path.join(DEST_DIR, PROJECT_NAME, \"{}.csv\".format(new_name))\n\n new_row = pd.DataFrame([[file_id, protocol, channel_no, date, strname,\n os.path.abspath(old_file),\n os.path.abspath(new_file)]],\n columns=METADATA_COLUMN_NAMES)\n mapdf = mapdf.append(new_row)\n\n os.rename(os.path.join(DEST_DIR, PROJECT_NAME, filename), new_file)\n os.rename(os.path.join(DEST_DIR, PROJECT_NAME, filename).replace(\".csv\", \"_Metadata.csv\"),\n new_file.replace(\".csv\", \"_Metadata.csv\"))\n\n mapdf.to_csv(os.path.join(DEST_DIR, PROJECT_NAME, PROJECT_NAME + \"map.csv\"), index=False)\n mapdf = mapdf.reset_index(drop=True)\n os.chdir(pwd)\n return json.dumps(mapdf.to_dict(\"list\"))", "def generate_no_time_two_files():\n fname = {'stress': 'resources/simple_stress_no_time.json',\n 'strain': 'resources/simple_strain_no_time.json'}\n expected = [ # makes an array of two pif systems\n pif.System(\n properties=[\n pif.Property(name='stress',\n scalars=list(np.linspace(0, 100))\n )]),\n\n pif.System(\n properties=[\n pif.Property(name='strain',\n scalars=list(np.linspace(0, 1))\n )])\n ]\n # dump the pifs into two seperate files\n with open(fname['stress'], 'w') as stress_file:\n pif.dump(expected[0], stress_file)\n with open(fname['strain'], 'w') as strain_file:\n pif.dump(expected[1], strain_file)\n\n return {\n 'file_names': fname,\n 'expected': {\n 'stress': expected[0],\n 'strain': expected[1]\n }\n }", "def write_logs():\n global log_queue\n global maxcount\n\n # Process the first set\n for count, msg in enumerate(log_queue):\n\n loginfo = {}\n print msg\n for entry in msg['log'].keys():\n\n loginfo[entry] = {}\n\n for key in msg['log'][entry].keys():\n loginfo[entry][key] = msg['log'][entry][key]\n\n with open(msg['info'], 'r') as f:\n metadata = json.load(f)\n\n try:\n metadata[msg['run']]\n\n except(KeyError):\n metadata[msg['run']] = {}\n\n if msg['cmd'] == 'write':\n for key in loginfo.keys():\n metadata[msg['run']][key] = loginfo[key]\n\n elif msg['cmd'] == 'reset':\n metadata[msg['run']] = {}\n\n with open(msg['info'], 'w') as f:\n f.write(json.dumps(metadata, indent=2, sort_keys=True))\n\n log_queue.remove(msg)\n\n if count > maxcount:\n break", "def get_result_json(filename, folder, score, threshold):\n assert(isinstance(filename, str))\n assert(isinstance(folder, str))\n assert(isinstance(score, float))\n assert(isinstance(threshold, float))\n \n return {\n 'filename': filename,\n 'folder': folder,\n 'score': score,\n 'decision': 1 if score <= threshold else 0\n }", "def _events_json(fname, overwrite=False):\n new_data = {\n \"sample\": {\"Description\": \"The event onset time in number of sampling points.\"},\n \"value\": {\n \"Description\": (\n \"The event code (also known as trigger code or event ID) \"\n \"associated with the event.\"\n )\n },\n \"trial_type\": {\"Description\": \"The type, category, or name of the event.\"},\n }\n\n # make sure to append any JSON fields added by the user\n fname = Path(fname)\n if fname.exists():\n orig_data = json.loads(\n fname.read_text(encoding=\"utf-8\"), object_pairs_hook=OrderedDict\n )\n new_data = {**orig_data, **new_data}\n\n _write_json(fname, new_data, overwrite)", "def historize_log_values(write_file_to: str, values: str):\n if path.isfile(write_file_to ):\n historic = read_json_file(write_file_to)\n historic += values\n unique = []\n [unique.append(elem) for elem in historic if elem not in unique]\n unique.sort(key=lambda event: datetime.strptime(event[\"eventTime\"], \"%Y-%m-%d %H:%M:%S\"))\n write_to_json_file(write_file_to, unique)\n else:\n write_to_json_file(write_file_to, values)", "def create_file(output_json):\n folder = \"data/\"\n filename = datetime.now().strftime(\"%d-%m-%Y\") + \"-moisture-read.json\"\n filepath = folder+filename\n\n # Create Local folder\n try:\n os.mkdir(folder)\n except OSError:\n pass\n #print(\"Directory already created or a failure occured on directory (%s)\" % folder)\n\n # Create Empty Json file if it doesnt exists\n if(Path(filepath)).exists():\n pass\n else:\n try:\n f = open(filepath, \"a\")\n f.write('{\\n\"moisture_iot_project\":[]\\n}')\n f.close()\n except Exception as e:\n print(\"Failure occured creating the JSON file (%s)\" % e)\n\n # Open Json file to append current structure\n with open(filepath) as outfile:\n data = json.load(outfile)\n\n # Get list with all dictionaries\n temp = data['moisture_iot_project']\n\n # Append current structure\n temp.append(output_json)\n\n # Reorganize List values and re-write to JSON file\n data['moisture_iot_project'] = temp\n write_json(data, filepath)", "def generate_ev_file(id_test):\n print(\"generate_ev_file\")\n \n ev_output_file_name=id_test+\".ev\"\n ev_input_file_name=id_test+\"_events.csv\"\n f_output = io.open(INPUT_PARSER_RESULTS_DIR+ev_output_file_name, \"w\",newline='\\n')\n f_input = io.open(AGRODEVS_INPUT_DIR+ev_input_file_name, \"r\")\n \n input_reader = csv.reader(f_input, delimiter=',')\n field_names_list = next(input_reader)\n if (field_names_list[0]!=\"campaign\"):\n print(\"First field of events file input should be 'campaing' but is:\"+field_names_list[0])\n print(\"Cannot generate event file\")\n return\n else:\n print(field_names_list)\n for line in input_reader:\n #generate timestamp for campaign\n #campania = int(int(ms)/100)+int(ss)*10+int(mm)*600+int(hh)*36000\n campaign = int(line[0])\n ms = (campaign*100)%1000\n ss = ((campaign*100)//1000)%60\n mm = ((campaign*100)//60000)%60\n hh = ((campaign*100)//360000)\n timeFormat = \"{:0>2d}\"\n msFormat = \"{:0>3d}\"\n timestamp_begin_event = str(timeFormat.format(hh))+\":\"+ \\\n str(timeFormat.format(mm))+\":\"+ \\\n str(timeFormat.format(ss))+\":\"+ \\\n str(msFormat.format(ms))\n timestamp_end_event = str(timeFormat.format(hh))+\":\"+ \\\n str(timeFormat.format(mm))+\":\"+ \\\n str(timeFormat.format(ss))+\":\"+ \\\n str(msFormat.format(ms+1))\n \n print(\"timestamp generated: \"+timestamp_begin_event)\n \n #generate events\n #begin events\n \n \n port_idx =0\n for event_port in line[1:]:\n port_idx=port_idx+1\n #print(\"processing port: \"+str(field_names_list[port_idx]))\n begin_event=CELL_DEVS_EXTERNAL_EVENT_BEGIN+ \\\n field_names_list[port_idx]+ \\\n \" \"+str(line[port_idx])\n \n f_output.write(timestamp_begin_event+\" \"+begin_event+\"\\n\")\n \n #end events\n port_idx=0\n for event_port in line[1:]:\n port_idx=port_idx+1\n #print(\"processing port: \"+str(field_names_list[port_idx]))\n end_event=CELL_DEVS_EXTERNAL_EVENT_ENDS+ \\\n field_names_list[port_idx]+ \\\n \" \"+str(line[port_idx])\n f_output.write(timestamp_end_event+\" \"+end_event+\"\\n\")\n \n \n \n f_input.close()\n f_output.close()", "def to_multiple_jsons(self):\n self.error_throw('output')\n\n if self.rank_method == methods_of_ranking[3]: #'diversified_ranking'\n self.output_div('multiple_jsons')\n else:\n self.output('multiple_jsons')", "def dump_to_json(fileinfos, out):\n jsonarray = json.dumps(fileinfos)\n json_filename = \"all_elements_used.json\"\n text_file = open(os.path.join(out,out_dir_name,json_filename), \"w\")\n text_file.write(jsonarray)\n text_file.close()\n stdout.write(\"... \"+json_filename+\" created\\n\")", "def event_json_to_csv(self, outfileName, data):\n event_raw = data.split('\\n')\n try:\n result = '\\nAPI ERROR! - ' + json.loads(event_raw[0])['error'] + '\\n'\n print result\n return\n except KeyError:\n pass\n\n '''remove the lost line, which is a newline'''\n event_raw.pop()\n\n event_list = []\n jsonfile = outfileName[:-4] + '.json'\n with open(jsonfile,'w') as j:\n j.write('[')\n i = 0\n event_count = len(event_raw)\n for event in event_raw:\n j.write(event)\n i += 1\n if i != event_count:\n j.write(',')\n else:\n j.write(']')\n event_json = json.loads(event)\n event_list.append(event_json)\n print 'JSON saved to ' + j.name\n j.close()\n\n subkeys = get_sub_keys(event_list)\n\n #open the file\n f = open(outfileName, 'w')\n writer = UnicodeWriter(f)\n\n #write the file header\n f.write(codecs.BOM_UTF8)\n\n #writer the top row\n header = [u'event']\n for key in subkeys:\n header.append(key)\n writer.writerow(header)\n\n #write all the data rows\n for event in event_list:\n line = []\n #get the event name\n try:\n line.append(event[u'event'])\n except KeyError:\n line.append(\"\")\n #get each property value\n for subkey in subkeys:\n try:\n line.append(unicode(event[u'properties'][subkey]))\n except KeyError:\n line.append(\"\")\n #write the line\n writer.writerow(line)\n\n print 'CSV saved to ' + f.name\n f.close()", "def testMoreEvents(self):\n splitter = SplitterFactory()\n jobFactory = splitter(self.singleFileSubscription)\n\n jobGroups = jobFactory(events_per_job=1000,\n performance=self.performanceParams)\n\n self.assertEqual(len(jobGroups), 1)\n\n self.assertEqual(len(jobGroups[0].jobs), 1)\n\n for job in jobGroups[0].jobs:\n self.assertEqual(job.getFiles(type=\"lfn\"), [\"/some/file/name\"])\n self.assertEqual(job[\"mask\"].getMaxEvents(), self.eventsPerJob)\n self.assertEqual(job[\"mask\"][\"FirstEvent\"], 0)\n self.assertEqual(job[\"mask\"][\"LastEvent\"], 99)", "def annotations_to_json(eaf_dir, json_dir):\n for file in os.listdir(eaf_dir):\n if file.endswith(\".eaf\"):\n print(\"converting\", file, \"to json\")\n file_name = os.path.join(json_dir, file[:-4]) + \".json\"\n file = os.path.join(eaf_dir, file)\n file_elan = Elan.Eaf(file)\n\n # Get all the data under the engagement_tier tier\n annotation_data = file_elan.get_annotation_data_for_tier(\"engagement_tier\")\n labels_for_annotation = elan_annotation_to_binary(annotation_data)\n\n # Create a json file storing the dictionary of {\"timeslot1,timeslot2\": 0/1(engaged/disengaged)}\n j = json.dumps(labels_for_annotation)\n f = open(file_name, \"w\")\n f.write(j)\n f.close()", "def generate_expected_two_files():\n fname = {'stress': 'resources/simple_stress.json',\n 'strain': 'resources/simple_strain.json'}\n expected = [ # makes an array of two pif systems\n pif.System(\n properties=[\n pif.Property(name='stress',\n scalars=list(np.linspace(0, 100)),\n conditions=pif.Value(\n name='time',\n scalars=list(np.linspace(0, 100))))]),\n\n pif.System(\n properties=[\n pif.Property(name='strain',\n scalars=list(np.linspace(0, 1)),\n conditions=pif.Value(\n name='time',\n scalars=list(np.linspace(0, 100))))])\n ]\n # dump the pifs into two seperate files\n with open(fname['stress'], 'w') as stress_file:\n pif.dump(expected[0], stress_file)\n with open(fname['strain'], 'w') as strain_file:\n pif.dump(expected[1], strain_file)\n\n return {\n 'file_names': fname,\n 'expected': {\n 'stress': expected[0],\n 'strain': expected[1]\n }\n }", "def detection_algorithm(f_blacklist, f_seconds, f_spikes):\n blacklist = create_blacklist_dict()\n filtered_traces_user_dict = defaultdict(list)\n\n file_type = get_file_type(f_blacklist, f_seconds, f_spikes)\n\n inspection_interval = 60*5\n\n bucket_list = [1, 5, 10, 30, 60]\n traces_file_1 = open('final_files/user_packets_1_%s'%(file_type), 'w')\n traces_file_5 = open('final_files/user_packets_5_%s'%(file_type), 'w')\n traces_file_10 = open('final_files/user_packets_10_%s'%(file_type), 'w')\n traces_file_30 = open('final_files/user_packets_30_%s'%(file_type), 'w')\n traces_file_60 = open('final_files/user_packets_bucket_60_%s'%(file_type), 'w')\n packets_file = open('final_files/user_packets_true_false_%s'%(file_type), 'w') \n\n for user in users:\n devids = []\n for d in user.devices:\n devids.append(str(d.id))\n\n devs = {}\n for d in user.devices:\n devs[d.id] = d.platform\n\n for elem_id in devids:\n sql_userid = \"\"\"SELECT login FROM devices WHERE id =:d_id\"\"\"\n user_id = ses.execute(text(sql_userid).bindparams(d_id = elem_id)).fetchone()\n idt = user_id[0]\n\n print idt\n packets_file.write(str(idt)+'\\n')\n\n if idt != 'bowen.laptop':\n continue\n\n #list contains Traces -> timestamp, url\n http_traces_list, dns_traces_list = get_test_data(elem_id)\n print len(http_traces_list)\n print len(dns_traces_list)\n\n cont = 0\n packets_true = defaultdict(list)\n packets_false = defaultdict(list)\n for packet in http_traces_list:\n print cont\n packets_list = get_packets_in_interval(packet, http_traces_list, inspection_interval)\n pkt_user_gen = filter_packet(packet, packets_list, blacklist, f_blacklist, f_seconds, f_spikes, packets_true, packets_false)\n packets_file.write(str(packet.timst) + ' ' + str(pkt_user_gen) + '\\n')\n if pkt_user_gen:\n filtered_traces_user_dict[idt].append(packet.timst)\n cont+=1\n\n packets_true = defaultdict(list)\n packets_false = defaultdict(list)\n for packet in dns_traces_list:\n packets_list = get_packets_in_interval(packet, dns_traces_list, inspection_interval)\n pkt_user_gen = filter_packet(packet, packets_list, blacklist, f_blacklist, f_seconds, f_spikes, packets_true, packets_false)\n packets_file.write(str(packet.timst) + ' ' + str(pkt_user_gen) + '\\n')\n if pkt_user_gen:\n filtered_traces_user_dict[idt].append(packet.timst)\n\n for bucket in bucket_list:\n print bucket\n traces_bucket = []\n traces_bucket = get_interval_list_predefined_gap(sorted(filtered_traces_user_dict[idt]), bucket)\n if bucket == 1:\n traces_file_1.write(idt + '\\n')\n elif bucket == 5:\n traces_file_5.write(idt + '\\n')\n elif bucket == 10:\n traces_file_10.write(idt + '\\n')\n elif bucket == 30:\n traces_file_30.write(idt + '\\n')\n elif bucket == 60:\n traces_file_60.write(idt + '\\n')\n\n print len(traces_bucket)\n for timst in traces_bucket:\n if bucket == 1:\n traces_file_1.write(str(timst) + '\\n')\n elif bucket == 5:\n traces_file_5.write(str(timst) + '\\n')\n elif bucket == 10:\n traces_file_10.write(str(timst) + '\\n')\n elif bucket == 30:\n traces_file_30.write(str(timst) + '\\n')\n elif bucket == 60:\n traces_file_60.write(str(timst) + '\\n')\n\n traces_file_1.close()\n traces_file_5.close()\n traces_file_10.close()\n traces_file_30.close()\n traces_file_60.close()", "def export_string_events(self):\n\n # ask user observations to analyze\n result, selectedObservations = self.selectObservations(MULTIPLE)\n if not selectedObservations:\n return\n\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations, maxTime=0,\n flagShowIncludeModifiers=True,\n flagShowExcludeBehaviorsWoEvents=False)\n\n if not plot_parameters[\"selected subjects\"] or not plot_parameters[\"selected behaviors\"]:\n return\n\n fn = QFileDialog(self).getSaveFileName(self, \"Export events as strings\", \"\",\n \"Events file (*.txt *.tsv);;All files (*)\")\n fileName = fn[0] if type(fn) is tuple else fn\n\n if fileName:\n\n response = dialog.MessageDialog(programName, \"Include observation(s) information?\", [YES, NO])\n\n try:\n with open(fileName, \"w\", encoding=\"utf-8\") as outFile:\n for obsId in selectedObservations:\n # observation id\n outFile.write(\"\\n# observation id: {}\\n\".format(obsId))\n # observation descrition\n outFile.write(\"# observation description: {}\\n\".format(\n self.pj[OBSERVATIONS][obsId][\"description\"].replace(os.linesep, \" \")))\n # media file name\n if self.pj[OBSERVATIONS][obsId][TYPE] in [MEDIA]:\n outFile.write(\"# Media file name: {0}{1}{1}\".format(\", \".join([os.path.basename(x)\n for x in\n self.pj[OBSERVATIONS]\n [obsId]\n [FILE][PLAYER1]]),\n os.linesep))\n if self.pj[OBSERVATIONS][obsId][TYPE] in [LIVE]:\n outFile.write(\"# Live observation{0}{0}\".format(os.linesep))\n\n # independent variables\n if \"independent_variables\" in self.pj[OBSERVATIONS][obsId]:\n outFile.write(\"# Independent variables\\n\")\n\n # rows.append([\"variable\", \"value\"])\n for variable in self.pj[OBSERVATIONS][obsId][\"independent_variables\"]:\n outFile.write(\"# {0}: {1}\\n\".format(variable,\n self.pj[OBSERVATIONS][obsId][\n \"independent_variables\"][variable]))\n outFile.write(\"\\n\")\n\n # selected subjects\n for subj in plot_parameters[\"selected subjects\"]:\n if subj:\n subj_str = \"\\n# {}:\\n\".format(subj)\n else:\n subj_str = \"\\n# No focal subject:\\n\"\n outFile.write(subj_str)\n\n out = self.create_behavioral_strings(obsId, subj, plot_parameters)\n if out:\n outFile.write(out + \"\\n\")\n\n except:\n logging.critical(sys.exc_info()[1])\n QMessageBox.critical(None, programName, str(sys.exc_info()[1]), QMessageBox.Ok | QMessageBox.Default,\n QMessageBox.NoButton)", "def export_aggregated_events(self):\n\n result, selectedObservations = self.selectObservations(MULTIPLE)\n if not selectedObservations:\n return\n\n # check if state events are paired\n out, not_paired_obs_list = \"\", []\n for obsId in selectedObservations:\n r, msg = project_functions.check_state_events_obs(obsId, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obsId], self.timeFormat)\n if not r:\n out += \"Observation: <strong>{obsId}</strong><br>{msg}<br>\".format(obsId=obsId, msg=msg)\n not_paired_obs_list.append(obsId)\n if out:\n self.results = dialog.ResultsWidget()\n self.results.setWindowTitle(programName + \" - Check selected observations\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(out)\n self.results.show()\n return\n\n parameters = self.choose_obs_subj_behav_category(selectedObservations, maxTime=0,\n flagShowIncludeModifiers=False,\n flagShowExcludeBehaviorsWoEvents=False)\n\n if not parameters[\"selected subjects\"] or not parameters[\"selected behaviors\"]:\n return\n\n # check for grouping results\n flag_group = True\n if len(selectedObservations) > 1:\n flag_group = dialog.MessageDialog(programName, \"Group events from selected observations in one file?\",\n [YES, NO]) == YES\n\n extended_file_formats = [\"Tab Separated Values (*.tsv)\",\n \"Comma Separated Values (*.csv)\",\n \"Open Document Spreadsheet ODS (*.ods)\",\n \"Microsoft Excel Spreadsheet XLSX (*.xlsx)\",\n \"Legacy Microsoft Excel Spreadsheet XLS (*.xls)\",\n \"HTML (*.html)\",\n \"SDIS (*.sds)\",\n \"SQL dump file (*.sql)\"]\n\n if flag_group:\n file_formats = [\"tsv\", \"csv\", \"ods\", \"xlsx\", \"xls\", \"html\", \"sds\",\n \"sql\"] # must be in same order than extended_file_formats\n\n if QT_VERSION_STR[0] == \"4\":\n fileName, filter_ = QFileDialog(self).getSaveFileNameAndFilter(self,\n \"Export aggregated events\",\n \"\", \";;\".join(extended_file_formats))\n else:\n fileName, filter_ = QFileDialog(self).getSaveFileName(self, \"Export aggregated events\", \"\",\n \";;\".join(extended_file_formats))\n\n if not fileName:\n return\n\n outputFormat = file_formats[extended_file_formats.index(filter_)]\n if pathlib.Path(fileName).suffix != \".\" + outputFormat:\n fileName = str(pathlib.Path(fileName)) + \".\" + outputFormat\n\n else: # not grouping\n\n items = (\"Tab Separated Values (*.tsv)\",\n \"Comma Separated values (*.csv)\",\n \"Open Document Spreadsheet (*.ods)\",\n \"Microsoft Excel Spreadsheet XLSX (*.xlsx)\",\n \"Legacy Microsoft Excel Spreadsheet XLS (*.xls)\",\n \"HTML (*.html)\")\n item, ok = QInputDialog.getItem(self, \"Export events format\", \"Available formats\", items, 0, False)\n if not ok:\n return\n outputFormat = re.sub(\".* \\(\\*\\.\", \"\", item)[:-1]\n\n exportDir = QFileDialog(self).getExistingDirectory(self, \"Choose a directory to export events\",\n os.path.expanduser(\"~\"),\n options=QFileDialog.ShowDirsOnly)\n if not exportDir:\n return\n\n if outputFormat == \"sql\":\n _, _, conn = db_functions.load_aggregated_events_in_db(self.pj,\n parameters[\"selected subjects\"],\n selectedObservations,\n parameters[\"selected behaviors\"])\n try:\n with open(fileName, \"w\") as f:\n for line in conn.iterdump():\n f.write(\"{}\\n\".format(line))\n except:\n errorMsg = sys.exc_info()[1]\n logging.critical(errorMsg)\n QMessageBox.critical(None, programName, str(errorMsg), QMessageBox.Ok | QMessageBox.Default,\n QMessageBox.NoButton)\n return\n\n data_header = tablib.Dataset()\n data_header.title = \"Aggregated events\"\n header = [\"Observation id\", \"Observation date\", \"Media file\", \"Total length\", \"FPS\"]\n if INDEPENDENT_VARIABLES in self.pj:\n for idx in sorted_keys(self.pj[INDEPENDENT_VARIABLES]):\n header.append(self.pj[INDEPENDENT_VARIABLES][idx][\"label\"])\n header.extend([\"Subject\", \"Behavior\"])\n header.extend([\"Modifiers\"])\n header.extend([\"Behavior type\", \"Start (s)\", \"Stop (s)\", \"Duration (s)\", \"Comment start\", \"Comment stop\"])\n data_header.append(header)\n\n data = copy.deepcopy(data_header)\n for obsId in selectedObservations:\n d = export_observation.export_aggregated_events(self.pj, parameters, obsId)\n data.extend(d)\n\n if not flag_group:\n fileName = str(\n pathlib.Path(pathlib.Path(exportDir) / safeFileName(obsId)).with_suffix(\".\" + outputFormat))\n r, msg = export_observation.dataset_write(data, fileName, outputFormat)\n if not r:\n QMessageBox.warning(None, programName, msg, QMessageBox.Ok | QMessageBox.Default,\n QMessageBox.NoButton)\n data = copy.deepcopy(data_header)\n\n if outputFormat == \"sds\": # SDIS format\n out = \"% SDIS file created by eMOC (www.eMOC.unito.it) at {}\\nTimed <seconds>;\\n\".format(\n datetime_iso8601())\n for obsId in selectedObservations:\n # observation id\n out += \"\\n<{}>\\n\".format(obsId)\n dataList = list(data[1:])\n for event in sorted(dataList, key=lambda x: x[-4]): # sort events by start time\n if event[0] == obsId:\n behavior = event[-7]\n # replace various char by _\n for char in [\" \", \"-\", \"/\"]:\n behavior = behavior.replace(char, \"_\")\n subject = event[-8]\n # replace various char by _\n for char in [\" \", \"-\", \"/\"]:\n subject = subject.replace(char, \"_\")\n event_start = \"{0:.3f}\".format(\n round(event[-4], 3)) # start event (from end for independent variables)\n if not event[-3]: # stop event (from end)\n event_stop = \"{0:.3f}\".format(round(event[-4] + 0.001, 3))\n else:\n event_stop = \"{0:.3f}\".format(round(event[-3], 3))\n out += \"{subject}_{behavior},{start}-{stop} \".format(subject=subject, behavior=behavior,\n start=event_start, stop=event_stop)\n out += \"/\\n\\n\"\n with open(fileName, \"wb\") as f:\n f.write(str.encode(out))\n return\n\n if flag_group:\n r, msg = export_observation.dataset_write(data, fileName, outputFormat)\n if not r:\n QMessageBox.warning(None, programName, msg, QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)", "def output_inversions(folder,threshold):\n \n start_stop_matcher = re.compile(\"(.*):(.*)-(.*)\")\n common_inversions = []\n abs_alt = defaultdict(dict)\n abs_ref = defaultdict(dict)\n perc_alt = defaultdict(dict)\n\n abs_alt,abs_ref,perc_alt,perc_ref,common_inversions = parse_geno_file(folder,True) ## Call the parser, the returned objects are dictionary of dictionaries\n\n FILE_HANDLE = open('output_inversions_'+str(threshold)+\".tsv\",'w')\n output_write = \"\\t\".join(common_inversions)\n print >> FILE_HANDLE,\"Strain\"+\"\\t\"+output_write\n\n for strain in abs_alt.keys():\n for inversion in common_inversions:\n #if(perc_alt[strain][inversion] > threshold):\n match = re.match(start_stop_matcher,inversion)\n #print match.group(1)\n start = int(match.group(2).replace(',',''))\n stop = int(match.group(3).replace(',',''))\n length = stop-start\n print >> FILE_HANDLE,strain+\"\\t\"+str(length)+\"\\t\"+str(perc_alt[strain][inversion])+\"\\t\"+str(perc_ref[strain][inversion])+\"\\t\"+str(abs_alt[strain][inversion])+\"\\t\"+str(abs_ref[strain][inversion])\n\n FILE_HANDLE.close()", "def filter_events(self):\n events_by_b = []\n events_by_npart = []\n\n bfiles = [f for f in glob.glob(self._path+\"/*.b\") if os.path.isfile(f)]\n npfiles = [f for f in glob.glob(self._path+\"/*.npart\") if os.path.isfile(f)]\n\n if bfiles:\n print \"Found a .b file, doing impact parameter filtering.\"\n self.sort_by_bfile(bfiles, events_by_b)\n if npfiles:\n print \"Found a .npart file, doing participant number filtering.\"\n self.sort_by_npartfile(npfiles, events_by_npart)\n\n if not bfiles and not npfiles:\n self.sort_by_logfolder(events_by_b, events_by_npart)\n\n # Return the appropriate list of events\n if events_by_b:\n print len(events_by_b), \"data files remain after filtering.\"\n return events_by_b\n elif events_by_npart:\n print len(events_by_npart), \"data files remain after filtering.\"\n return events_by_npart\n else:\n print \"filter_events: None of the events fulfill the required criteria:\"\n print \"b range:\", self._bmin, self._bmax, \"Npart range:\", self._npmin, self._npmax", "def makeWeights(_files,treeName,category,_outputFile, BINS, PT, ETA):\n\tROOT.gROOT.SetBatch(1)\n\n\t#treeName = 'histoMuFromTk/fitter_tree'\n\t_trees = dict( [ ( name, _file.Get(treeName) ) for name,_file in _files.iteritems()] )\n\t#Check if in both files are the tree\n\tfor _tree in _trees.itervalues():\n\t\tif not _tree:\n\t\t\treturn None\n\t\n\thistos = {}\n\tweights = {}\n\n\t#-- The ':' token in A:B read as 'B conditioned to A' (look this unregular order)\n\t#-- The categories are datamembers which can be 1 or 0, a condition;\n\t#-- if we want to weight the pt-distribution of all probes for the L1Mu3 trigger\n\t#-- category, we must decided with respect which muonID category (Glb, TMLSAT, ...), then\n\t#-- reduce to a subset which the muonID category == 1 and calculate the weight of the\n\t#-- pt-distribution\n\t#-- The category variable can be A:B:C:..., the last one is the only one which we don't \n\t#-- want to reduce (see find category)\n\tcondCategory = ''\n\tstoreCategory = 'weight'\n\tif category.find(':') != -1:\n\t\t_catList = category.split(':')\n\t\t#-- This for is to include the quality cuts and other possible categories\n\t\tfor i in xrange(len(_catList)-1):\n\t\t\tcondCategory += ' && '+_catList[i]+' == 1 '# BUG------> && '+triggerCat+' == 1' \n\t\t\tstoreCategory += '_'+_catList[i]\n\n\tinstName = lambda k,pt : PT+'>>h_'+category+name+str(k)+'(50,'+str(pt[0])+','+str(pt[1])+')'\n\tcuts = lambda pt,eta: PT+' >= '+str(pt[0])+' && '+PT+' <'+str(pt[1])+\\\n\t\t\t' && '+ETA+' >= '+str(eta[0])+' && '+ETA+' < '+str(eta[1])+condCategory\n\t#print cuts #--------------------------> PROVISONAL: PARECE QUE SE RECUPERAN LOS ESPECTROS DE LOS PASSING\n\t #--------------------------> NO DE LOS ALL\n\tk = 0\n\tfor i in xrange(len(BINS.__getattribute__(PT))-1):\n\t\tpt = (BINS.__getattribute__(PT)[i],BINS.__getattribute__(PT)[i+1])\n\t\tfor j in xrange(len(BINS.__getattribute__(ETA))-1):\n\t\t\teta = (BINS.__getattribute__(ETA)[j],BINS.__getattribute__(ETA)[j+1])\n\t\t\tfor name,_t in _trees.iteritems(): \n\t\t\t\tN = _t.Draw( instName(k,pt),cuts(pt,eta) )\n\t\t\t\thistos[name] = ROOT.gDirectory.Get('h_'+category+name+str(k))\n\t\t\tprint ' \\033[1;34mDoing bin'+str(k)+' '+PT+'=('+str(pt[0])+','+str(pt[1])+') '+ETA+'=('+str(eta[0])+','+str(eta[1])+')\\033[1;m'\n\t\t\tswap = histos['numerator'].Clone(category+'_bin'+str(k))\n\t\t\tdummy = swap.Divide(histos['denominator'])\n\t\t\tweights[category+'_bin'+str(k)] =( (eta[0],eta[1]), (pt[0],pt[1]), ROOT.gDirectory.Get(category+'_bin'+str(k)) )\n\t\t\t#Acura els limits\n\t\t\tweights[category+'_bin'+str(k)][2].GetXaxis().SetLimits( pt[0], pt[1] ) \n\t\t\t#weights[category+'_bin'+str(k)][2].SetNormFactor(1) \n\t\t\tk += 1\n\t_out = ROOT.TFile(_outputFile,'RECREATE')\n\tfor name,(etaBins,ptBins,histo) in weights.iteritems():\n\t\thisto.Write()\n\t_out.Close()\t\n\treturn weights", "def preProcess(self,filename,fileoutput):\t\n\tdata=[]\n\tval =set()\n\tfo = open(fileoutput, \"wb\")\n\twith open(filename) as data_file:\n \tfor tags in data_file:\n\t\t\tif \"timestamp\" not in tags: \n \t \t continue\n\t\t\tts = re.search('timestamp: (.+?)\\)', tags).group(1)\n\t\t\tval =set()\n\t\t\tval.update({tag for tag in tags.split() if tag.startswith(\"#\")})\n\t\t\t#print val\n\t\t\tif len(val) >1:\n\t\t\t\tself.maintainWindow(data,ts)\n\t\t\t\tdata.append((ts,val))\n\t\t\t\tself.createAdjList(val,\"add\")\n\t\t\t\tprint(\"***\")\n\t\t\telse:\n\t\t\t\tself.maintainWindow(data,ts)\n\t\t\t\tprint(\"@@@@\")\n\t\t\tresult = self.calculateRollingAverages() \n\t\t\tfo.write(result+\"\\n\")\n fo.close()\n data_file.close()", "def write_filter_spec(filters, filename):\n data = export_filters(filters)\n with open(filename, 'w') as fp:\n json.dump(data, fp, indent = 4)", "def on_task_output(self, task, config):\n series = {}\n movies = {}\n for entry in task.accepted:\n if all(field in entry for field in ['tvdb_id', 'series_name', 'series_season', 'series_episode']):\n eid = str(entry['tvdb_id'])\n sno = str(entry['series_season'])\n eno = entry['series_episode']\n show = series[eid] if eid in series else {'name': entry['series_name'], 'seasons': {}}\n if not sno in show['seasons']:\n show['seasons'][sno] = []\n if not eno in show['seasons'][sno]:\n show['seasons'][sno].append(eno)\n elif all(field in entry for field in ['imdb_id', 'movie_name']):\n movies[entry['imdb_id']] = entry['movie_name']\n if series:\n for eid, show in series.items():\n dest = os.path.join(config, 'series.watched.%s.json' % eid)\n data = {'name': show['name'], 'rating': 5}\n if os.path.exists(dest):\n with open(dest, 'r') as f:\n data = json.load(f)\n for season, episodes in show['seasons'].items():\n lst = data[season] if season in data else []\n data[season] = list(set(lst + episodes))\n text = json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))\n with open(dest, 'w') as f:\n f.write(text)\n self.log.info('Added watched episodes to Uoccin')\n if movies:\n dest = os.path.join(config, 'movies.watched.json')\n data = {}\n if os.path.exists(dest):\n with open(dest, 'r') as f:\n data = json.load(f)\n n = 0\n for eid, name in movies.items():\n if not eid in data:\n data[eid] = {'name': name, 'rating': 5}\n n += 1\n if n > 0:\n text = json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))\n with open(dest, 'w') as f:\n f.write(text)\n self.log.info('Added watched movies to Uoccin')", "def create_file_output(self, results):\n for key, value in results.table_output.items():\n name_timestamp = key.split('&')\n _name = name_timestamp[0]\n timestamp = name_timestamp[1]\n file_name = output_file_prefix + \"-\" + _name + \".csv\"\n if file_name not in self.file_creation_set:\n self._header_written = False\n self.file_creation_set.update([file_name])\n for row in value:\n with open(file_name, 'a+') as file_to_write:\n row.update({'Timestamp': timestamp})\n _keys = row.keys()\n file_output = csv.DictWriter(file_to_write, _keys)\n if not self._header_written:\n file_output.writeheader()\n self._header_written = True\n file_output.writerow(row)\n file_to_write.close()\n return results", "def write_trajectory_files(self,suffix='--filtered'):\n for downD in self.case.downstreamD:\n xi = downD * self.case.turbine.D\n inputs = self.case.get_outputs(self.method,downD)\n outputs = self.case.get_outputs(self.method,downD,suffix=suffix)\n print(outputs['trajectory_file'])\n df = pd.read_csv(inputs['trajectory_file'],\n header=None)\n # should have at least 3 columns\n # 0: time, 1: ywake, 2: zwake\n newdf = self.df.xs(xi, level='x').iloc[:self.Ntimes[downD]]\n assert (len(newdf) == len(df))\n notna = ~pd.isna(newdf['y'])\n print('updated',np.count_nonzero(notna),'/',len(newdf),'at x=',xi)\n df.loc[notna,1] = newdf.loc[notna,'y']\n df.loc[notna,2] = newdf.loc[notna,'z'] + self.case.turbine.zhub\n df.to_csv(outputs['trajectory_file'],\n header=None,index=None)", "def process(input_files):\n n_words_event = len(OUT_CONFIG['event']['fields'])\n n_words_hit = len(OUT_CONFIG[args.format]['fields'])\n # Initialising event\n event = -1\n G = Geometry(CONFIGURATION)\n H = HitManager()\n SLs = {}\n for iSL in config.SL_SHIFT.keys():\n SLs[iSL] = SL(iSL, config.SL_SHIFT[iSL], config.SL_ROTATION[iSL])\n # Defining which SLs should be plotted in which global view\n GLOBAL_VIEW_SLs = {\n 'xz': [SLs[0], SLs[2]],\n 'yz': [SLs[1], SLs[3]]\n }\n # Analyzing the hits in each event\n for file_path in input_files:\n # Reading input file line by line\n with open(file_path, 'r') as file_in:\n file_line_nr = 0\n for line in file_in:\n file_line_nr += 1\n if file_line_nr <= 1:\n continue\n hits_lst = []\n H.reset()\n words = line.strip().split()\n event = int(words[0])\n # Skipping event if it was not specified in command line\n if args.events is not None and event not in args.events:\n continue\n nhits = int(words[1])\n print('Event {0:<5d} # hits: {1:d}'.format(event, nhits))\n if args.glance:\n continue\n # Skipping event with too many hits (most likely a spark event that will take forever to process)\n if nhits > args.max_hits:\n continue\n # Extracting hit information\n for iHit in range(nhits):\n start = n_words_event + iHit*n_words_hit\n ww = words[start:start+n_words_hit]\n hits_lst.append([int(ww[0]), int(ww[1]), int(ww[2]), float(ww[3])])\n H.add_hits(hits_lst)\n # Removing hits with time outside the timebox region\n H.hits.drop(H.hits.loc[(H.hits['time'] < config.TIMEBOX[0]) | (H.hits['time'] > config.TIMEBOX[1])].index, inplace=True)\n # Calculating local+global hit positions\n H.calc_pos(SLs)\n # Creating figures of the chambers\n figs = {}\n figs['sl'] = plot.book_chambers_figure(G)\n figs['global'] = plot.book_global_figure(G, GLOBAL_VIEW_SLs)\n # Analyzing hits in each SL\n sl_fit_results = {}\n for iSL, sl in SLs.items():\n # print('- SL', iSL)\n hits_sl = H.hits.loc[H.hits['sl'] == iSL].sort_values('layer')\n if args.plot:\n # Drawing the left and right hits in local frame\n figs['sl'][iSL].square(x=hits_sl['lposx'], y=hits_sl['posz'], size=5,\n fill_color='red', fill_alpha=0.7, line_width=0)\n figs['sl'][iSL].square(x=hits_sl['rposx'], y=hits_sl['posz'], size=5,\n fill_color='blue', fill_alpha=0.7, line_width=0)\n # Performing track reconstruction in the local frame\n sl_fit_results[iSL] = []\n layer_groups = hits_sl.groupby('layer').groups\n n_layers = len(layer_groups)\n # Stopping if lass than 3 layers of hits\n if n_layers < config.NHITS_MIN_LOCAL:\n continue\n hitid_layers = [gr.to_numpy() for gr_name, gr in layer_groups.items()]\n # Building the list of all possible hit combinations with 1 hit from each layer\n hits_layered = list(itertools.product(*hitid_layers))\n # Building more combinations using only either left or right position of each hit\n for hit_ids in hits_layered:\n # print('- -', hit_ids)\n posz = hits_sl.loc[hits_sl.index.isin(hit_ids), 'posz'].values\n posx = hits_sl.loc[hits_sl.index.isin(hit_ids), ['lposx', 'rposx']].values\n posx_combs = list(itertools.product(*posx))\n # Fitting each combination\n fit_results_lr = []\n fit_range = (min(posz), max(posz))\n for iC, posx_comb in enumerate(posx_combs):\n pfit, stats = Polynomial.fit(posz, posx_comb, 1, full=True, window=fit_range, domain=fit_range)\n chi2 = stats[0][0] / n_layers\n if chi2 < config.FIT_CHI2_MAX:\n a0, a1 = pfit\n fit_results_lr.append((chi2, hit_ids, pfit))\n # Keeping only the best fit result from the given set of physical hits\n fit_results_lr.sort(key=itemgetter(0))\n if fit_results_lr:\n sl_fit_results[iSL].append(fit_results_lr[0])\n # Sorting the fit results of a SL by Chi2\n sl_fit_results[iSL].sort(key=itemgetter(0))\n if sl_fit_results[iSL]:\n # Drawing fitted tracks\n posz = np.array([G.SL_FRAME['b']+1, G.SL_FRAME['t']-1], dtype=np.float32)\n for iR, res in enumerate(sl_fit_results[iSL][:5]):\n col = config.TRACK_COLORS[iR]\n posx = res[2](posz)\n figs['sl'][iSL].line(x=posx, y=posz,\n line_color=col, line_alpha=0.7, line_width=3)\n\n if args.plot:\n # Drawing the left and right hits in global frame\n for view, sls in GLOBAL_VIEW_SLs.items():\n sl_ids = [sl.id for sl in sls]\n hits_sls = H.hits.loc[H.hits['sl'].isin(sl_ids)]\n figs['global'][view].square(x=hits_sls['glpos'+view[0]], y=hits_sls['glpos'+view[1]],\n fill_color='red', fill_alpha=0.7, line_width=0)\n figs['global'][view].square(x=hits_sls['grpos'+view[0]], y=hits_sls['grpos'+view[1]],\n fill_color='blue', fill_alpha=0.7, line_width=0)\n # Building 3D segments from the fit results in each SL\n posz = np.array([G.SL_FRAME['b'], G.SL_FRAME['t']], dtype=np.float32)\n for sl in sls:\n for iR, res in enumerate(sl_fit_results[sl.id][:5]):\n posx = res[2](posz)\n start = (posx[0], 0, posz[0])\n end = (posx[1], 0, posz[1])\n segL = Segment(start, end)\n segG = segL.fromSL(sl)\n segG.calc_vector()\n # Extending the global segment to the full height of the view\n start = segG.pointAtZ(plot.PLOT_RANGE['y'][0])\n end = segG.pointAtZ(plot.PLOT_RANGE['y'][1])\n # Getting XY coordinates of the global segment for the current view\n iX = COOR_ID[view[0]]\n posx = [start[iX], end[iX]]\n posy = [start[2], end[2]]\n # Drawing the segment\n col = config.TRACK_COLORS[sl.id]\n figs['global'][view].line(x=posx, y=posy,\n line_color=col, line_alpha=0.7, line_width=3)\n print(sl.id, iR, posx, posy)\n\n\n\n\n\n\n # Storing the figures to an HTML file\n if args.plot:\n plots = [[figs['sl'][l]] for l in [3, 1, 2, 0]]\n plots.append([figs['global'][v] for v in ['xz', 'yz']])\n bokeh.io.output_file(args.output.format(event), mode='cdn')\n bokeh.io.save(bokeh.layouts.layout(plots))", "def generate_expected_one_file():\n fname = 'resources/simple_data.json'\n\n stress = np.linspace(0, 100)\n stress_time = np.linspace(0, 100)\n strain = np.linspace(0, 100)\n strain_time = np.linspace(0, 100)\n expected = pif.System(\n subSystems=None,\n properties=[\n pif.Property(name='stress',\n scalars=list(stress),\n conditions=pif.Value(\n name='time',\n scalars=list(stress_time))),\n\n pif.Property(name='strain',\n scalars=list(strain),\n conditions=pif.Value(\n name='time',\n scalars=list(strain_time)))\n ])\n with open(fname, 'w') as data:\n pif.dump(expected, data)\n\n return {\n 'file_name': fname,\n 'expected': expected\n }", "def runEventCreation():\r\n config = CONFIG['steps']['EventCreation']\r\n ci = config['inputs']\r\n co = config['outputs']\r\n\r\n min_window_size = ci['min_window_size']\r\n change_speed_by = ci['change_speed_by']\r\n speed_ratio = ci['train_zero_speed_ratio']\r\n datetime_limit = ci['datetime_limit']\r\n csv_name_prefix = ci['csv_name_prefix']\r\n input_bucket = ci['bucket']\r\n window_event_bucket = ci['window_event_bucket']\r\n window_events_file = ci['window_events_file']\r\n\r\n output_bucket = co['bucket']\r\n event_dir = co['event_dir']\r\n filename_include = co['filename_include']\r\n\r\n minio_config = CONFIG['artifacts']['minio']\r\n minioClient = create_minio_client(minio_config[\"endpoint_url\"],\r\n access_key=minio_config[\"access_key\"],\r\n secret_key=minio_config[\"secret_key\"],\r\n secure=minio_config['secure'])\r\n\r\n boto_client = boto3.client(\"s3\",\r\n endpoint_url=minio_config[\"endpoint_url\"],\r\n aws_access_key_id=minio_config[\"access_key\"],\r\n aws_secret_access_key=minio_config[\"secret_key\"],\r\n region_name=minio_config[\"region_name\"])\r\n\r\n csv_files = get_files(input_bucket, boto_client,\r\n file_type='csv', prefix='filtered')\r\n csv_files = ['filtered/7016_2020-09-09.csv']\r\n create_window_event(files=csv_files,\r\n input_bucket=input_bucket,\r\n output_bucket=output_bucket,\r\n minio_client=minioClient,\r\n min_window_size=min_window_size,\r\n ouput_dir=event_dir,\r\n window_event_bucket=window_event_bucket,\r\n window_events_file=window_events_file,\r\n csv_name_prefix=csv_name_prefix,\r\n change_speed_by=change_speed_by,\r\n train_zero_speed_ratio=speed_ratio,\r\n datetime_limit=datetime_limit,\r\n filename_include=filename_include)", "def write_to_json(results, filename):\n resultsDict = []\n with open(filename, 'w') as results_file:\n for i, result in enumerate(results):\n resultsDict.append(\n {'datetime_utc': result.time_str,\n 'distance_au': result.distance,\n 'velocity_km_s': result.velocity,\n 'neo': {'designation': result._designation,\n 'name': result.neo.name,\n 'diameter_km': result.neo.diameter,\n 'potentially_hazardous': result.neo.hazardous}})\n if resultsDict[i]['neo']['name'] is None:\n resultsDict[i]['neo']['name'] = ''\n json.dump(resultsDict, results_file, indent=2)", "def gen_subevent_bools(p_10,p_100):\r\n #list of subevent booleans\r\n subevent_bools = []\r\n \r\n #extracting 10 MeV peak flux if it exists\r\n for j in range(len(p_10)):\r\n try:\r\n p10 = float(p_10[j])\r\n except ValueError:\r\n p10 = 'nan'\r\n \r\n #extracting 100 MeV peak flux if it exists\r\n try:\r\n p100 = float(p_100[j])\r\n except ValueError:\r\n p100 = 'nan'\r\n \r\n #checking if peak fluxes exist\r\n if str(p10) != 'nan' and str(p100) != 'nan':\r\n #if the peak fluxes both exist and >10 MeV is both below threshold,\r\n #subevent is true (only care about >10 bc of definition of subevent)\r\n if p10 < 10:\r\n subevent_bools.append(True)\r\n elif p10 > 10:\r\n subevent_bools.append(False)\r\n \r\n #if >10 MeV doesn't exist, subevent is true\r\n else:\r\n subevent_bools.append(True)\r\n \r\n return(subevent_bools)", "def vpn_common_files(data, treshold=0.7):\n part_one = {}\n part_two = {}\n\n for extension in data:\n for webpage in data[extension]:\n if webpage not in result:\n part_one[webpage] = {}\n part_two[webpage] = {}\n for file in data[extension][webpage]:\n if file not in part_one[webpage]:\n part_one[webpage][file] = [\n extension,\n ]\n else:\n part_one[webpage][file].append(extension)\n\n # Keep common files in part one\n\n for wp_key, wp_value in list(part_one.items()):\n for f_key, f_value in list(wp_value.items()):\n if len(f_value) < len(data) * treshold:\n # Its not common in as many vpns as we want\n part_two[wp_key][f_key] = part_one[wp_key].pop(f_key)\n\n return part_one, part_two", "def INPUT_Periods_file(input):\n \n global events\n \n tb = 3600\n ta = 3600\n \n Period = input['min_date'].split('T')[0] + '_' + \\\n input['max_date'].split('T')[0] + '_' + \\\n str(input['min_mag']) + '_' + str(input['max_mag'])\n eventpath = os.path.join(input['datapath'], Period)\n \n len_events = len(events)\n \n input_period = open(os.path.join(os.getcwd(), 'INPUT-Periods'), 'a+')\n\n for i in range(0, len_events):\n \n str_event = str(events[i]['datetime']-tb) + '_' + \\\n str(events[i]['datetime']+ta) + '_' + \\\n str(events[i]['magnitude'] - 0.01) + '_' + \\\n str(events[i]['magnitude'] + 0.01) + '\\n'\n input_period.writelines(str_event)\n \n input_period.close()\n \n print '************************************************************' \n print 'New INPUT-Periods file is generated in your folder.'\n print 'Now, you could run the program again based on your desired event :)' \n print '************************************************************'\n \n sys.exit()", "def test_log_filenames_multiple_date_in_past(self):\n time_lower = datetime.datetime.now() - datetime.timedelta(seconds=7210)\n time_upper = time_lower + datetime.timedelta(seconds=20)\n (tracks, statuses) = self.app.log_filenames(\n [self.track_path('silence.mp3')]*5,\n timestamp='2 hours ago'\n )\n self.assertEqual(len(tracks), 5)\n self.assertEqual(self.get_track_count(), 5)\n track_objs = []\n for (idx, track) in enumerate(tracks):\n with self.subTest(idx=idx):\n track_obj = self.get_track_by_id(track.pk)\n track_objs.append(track_obj)\n self.assertGreaterEqual(track_obj['timestamp'], time_lower)\n self.assertLess(track_obj['timestamp'], time_upper)\n if idx > 0:\n self.assertGreater(track_obj['timestamp'],\n track_objs[idx-1]['timestamp'])", "def generate_args(threshold):\n # EUWATCH\n return_periods = ['00005', '00025', '00050', '00100', '00250', '00500', '01000']\n\n for return_period in return_periods:\n print(\"EUWATCH\", return_period, threshold)\n yield {\n 'infile': \"data/tanzania_flood/EUWATCH/inun_dynRout_RP_{}_Tanzania/inun_dynRout_RP_{}_contour_Tanzania.tif\".format(return_period, return_period),\n 'tmpfile_1': \"data/tanzania_flood/EUWATCH_{}_mask-{}.tif\".format(return_period, threshold),\n 'tmpfile_2': \"data/tanzania_flood/EUWATCH_{}_vector_mask-{}.shp\".format(return_period, threshold),\n 'outfile': \"data/tanzania_flood/threshold_{}/EUWATCH_{}_mask-{}.shp\".format(threshold, return_period, threshold)\n }\n\n # GLOFRIS models\n models = ['GFDL-ESM2M', 'HadGEM2-ES', 'IPSL-CM5A-LR', 'MIROC-ESM-CHEM', 'NorESM1-M']\n\n for model in models:\n for return_period in return_periods:\n print(model, return_period, threshold)\n yield {\n 'infile': \"data/tanzania_flood/{}/rcp6p0/2030-2069/inun_dynRout_RP_{}_bias_corr_masked_Tanzania/inun_dynRout_RP_{}_bias_corr_contour_Tanzania.tif\".format(model, return_period, return_period),\n 'tmpfile_1': \"data/tanzania_flood/{}_{}_mask-{}.tif\".format(model, return_period, threshold),\n 'tmpfile_2': \"data/tanzania_flood/{}_{}_vector_mask-{}.shp\".format(model, return_period, threshold),\n 'outfile': \"data/tanzania_flood/threshold_{}/{}_{}_mask-{}.shp\".format(threshold, model, return_period, threshold)\n }\n\n # Convert SSBN models\n ssbn_return_periods=['5', '10', '20', '50', '75', '100', '200', '250', '500', '1000']\n ssbnmodels = {\n \"TZ_fluvial_undefended\": \"FU\",\n \"TZ_pluvial_undefended\": \"PU\"\n }\n\n for model, abbr in ssbnmodels.items():\n for return_period in ssbn_return_periods:\n print(\"SSBN\", abbr, return_period, threshold)\n yield {\n 'infile': \"data/tanzania_flood/SSBN_flood_data/{}/TZ-{}-{}-1.tif\".format(model, abbr, return_period),\n 'tmpfile_1': \"data/tanzania_flood/SSBN_{}_{}_mask-{}.tif\".format(abbr, return_period, threshold),\n 'tmpfile_2': \"data/tanzania_flood/SSBN_{}_{}_vector_mask-{}.shp\".format(abbr, return_period, threshold),\n 'outfile': \"data/tanzania_flood/threshold_{}/SSBN_{}_{}_mask-{}.shp\".format(threshold, abbr, return_period, threshold)\n }", "def sort_by_logfolder(self, events_by_b, events_by_npart):\n jobid = -1\n impb = -1.0\n npart = -1\n\n # Information about b and npart is assumed to be\n # found in output log files, stored in \"log\" folder\n logpath = self._path+\"/log/\"\n if not os.path.isdir(logpath):\n print \"filter_events: Bad logfile path:\", logpath\n logfiles = glob.glob(logpath+\"*\"+self._logstring+\"*\")\n processed_files = []\n files_to_remove = []\n for logfile in logfiles:\n datafile = \"\"\n with open(logfile, 'r') as lf:\n for line in lf:\n words = line.split()\n # Each output should be generated by different random seed\n if \"rsd\" in words:\n jobid = words[1]\n datafile = self.outputname(jobid)\n # If seed is not unique, output has been overwritten\n # and information about b and npart has become\n # ambiguous. Mark such files for deletion.\n if datafile in processed_files:\n sys.stderr.write(\"Warning: Multiple log files for one output.\\n\")\n sys.stderr.write(str(datafile)+\" will be ignored.\\n\")\n files_to_remove.append(datafile)\n break\n else:\n processed_files.append(datafile)\n if \"bimp:\" in words:\n impb = float(words[1])\n if impb > self._bmin and impb < self._bmax:\n events_by_b.append(datafile)\n if \"participants:\" in words:\n npart = int(words[1])\n if npart > self._npmin and npart < self._npmax:\n events_by_npart.append(datafile)\n\n # Remove data files which were ambiguously determined\n # due to being referred by multiple log files\n for datafile in files_to_remove:\n if datafile in events_by_b:\n events_by_b.remove(datafile)\n if datafile in events_by_npart:\n events_by_npart.remove(datafile)", "def log_event_to_file(event):\n with open('eventlogs/{}.json'.format(time.time()), 'w') as event_write:\n event_write.write(json_dumpstring(event))\n pass", "def write_to_json(results, filename):\r\n dicts = []\r\n for row in results:\r\n print(row.neo)\r\n r = {'datetime_utc': datetime_to_str(row.time),\r\n 'distance_au': row.distance, 'velocity_km_s': row.velocity,\r\n 'designation': row._designation,\r\n 'neo': {'designation': row.neo.designation,\r\n 'name': row.neo.name, 'diameter_km': row.neo.diameter,\r\n 'potentially_hazardous': row.neo.hazardous}}\r\n dicts.append(r)\r\n\r\n with open(filename, 'w') as json_file:\r\n json.dump(dicts, json_file, indent=4, sort_keys=False)", "def dump_result_as_json(self, filename):\n import json\n with open(filename, \"w\") as out:\n data = {\"raport\": [x.to_dict() for x in self.events]}\n json.dump(data, out, indent=4, sort_keys=True)", "def obs_csv2json(input_file,output_file,example_path,instrument):\r\n\r\n obs_path = Path(cfg.obs_path)\r\n \r\n with open(example_path,'r') as e:\r\n example = js.load(e)\r\n \r\n #deleting unused categories\r\n del(example['sep_forecast_submission']['forecasts'])\r\n del(example['sep_forecast_submission']['triggers'][2])\r\n del(example['sep_forecast_submission']['triggers'][1])\r\n del(example['sep_forecast_submission']['triggers'][0])\r\n del(example['sep_forecast_submission']['triggers'][0]['particle_intensity']['instrument'])\r\n del(example['sep_forecast_submission']['triggers'][0]['particle_intensity']['last_data_time'])\r\n del(example['sep_forecast_submission']['contacts'])\r\n del(example['sep_forecast_submission']['model'])\r\n del(example['sep_forecast_submission']['issue_time'])\r\n \r\n example['sep_forecast_submission']['mode'] = 'observation'\r\n\r\n #json template for observations\r\n obs_json = example\r\n\r\n fieldnames = ('energy_threshold','flux_threshold','start_time','intensity',\r\n 'peak_time','rise_time','end_time','duration','fluence>10',\r\n 'fluence>100')\r\n\r\n #extracting data from csv file\r\n with open(input_file,'r') as f:\r\n reader = csv.DictReader(f, fieldnames)\r\n out = js.dumps( [ row for row in reader ] )\r\n\r\n obs_data = js.loads(out)\r\n\r\n data={}\r\n (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['observatory']) = instrument\r\n\r\n #creating data for all energy levels forecast\r\n for j in range(1,len(obs_data)):\r\n data[j-1]=obs_data[j]\r\n\r\n #recording start and end times for all events\r\n for i in range(len(data)):\r\n data[i]['start_time'] = datetime.strptime(data[i]['start_time'],'%Y-%m-%d %H:%M:%S')\r\n data[i]['start_time'] = data[i]['start_time'].isoformat()\r\n data[i]['end_time'] = datetime.strptime(data[i]['end_time'],'%Y-%m-%d %H:%M:%S')\r\n data[i]['end_time'] = data[i]['end_time'].isoformat()\r\n data[i]['peak_time'] = datetime.strptime(data[i]['peak_time'],'%Y-%m-%d %H:%M:%S')\r\n data[i]['peak_time'] = data[i]['peak_time'].isoformat()\r\n \r\n #recording observed values for all events\r\n if i > 0:\r\n (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['ongoing_events']).append({})\r\n\r\n event = (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['ongoing_events'][i])\r\n \r\n #start and end times\r\n event['start_time']=data[i]['start_time']\r\n event['threshold'] = data[i]['flux_threshold']\r\n event['energy_min'] = float(data[i]['energy_threshold'][1:])\r\n event['energy_max'] = -1\r\n event['end_time']=data[i]['end_time']\r\n\r\n #peak values\r\n event['peak_intensity']=data[i]['intensity']\r\n event['peak_time'] = data[i]['peak_time']\r\n event['intensity_units']='pfu'\r\n \r\n #fluence values\r\n event['fluence'] = [{'energy_min' : '10','fluence_value' : 'fluence_value',\r\n 'units' : 'MeV [cm^-2]'},\r\n {'energy_min' : '100', 'fluence_value' : 'fluence_value',\r\n 'units' : 'MeV [cm^-2]'}]\r\n event['fluence'][0]['fluence']=data[i]['fluence>10']\r\n event['fluence'][1]['fluence']=data[i]['fluence>100']\r\n\r\n\r\n if float(event['peak_intensity']) >= cfg.pfu_threshold[cfg.energy_threshold.index\r\n (int(event['energy_min']))]:\r\n event['all_clear_boolean'] = 'false'\r\n\r\n else:\r\n event['all_clear_boolean'] = 'true'\r\n\r\n\r\n #building json file\r\n with open(obs_path / output_file, 'w') as s:\r\n js.dump(obs_json,s,indent=1)\r\n print('json file %s created' %output_file)\r\n \r\n return", "def write_to_files():\n\t# Create output files\n\toutput = [None, \\\n\t\t open(\"priority-1.txt\", \"w\"), \\\n\t\t open(\"priority-2.txt\", \"w\"), \\\n\t\t open(\"priority-3.txt\", \"w\"), \\\n\t\t open(\"priority-4.txt\", \"w\"), \\\n\t\t open(\"priority-5.txt\", \"w\"), ]\n\n\t# Loop over all fields and write them to the correct file\n\tfor field in sorted(reportlog.keys()):\n\t\tpriority = reportlog[field]['priority']\n\t\tlabel = reportlog[field]['label']\n\n\t\toutput[priority].write(\"intphas_%s\\t%s\\n\" % (field, label))\n\t\toutput[priority].flush()\n\n\t# Close files\n\tfor i in [1,2,3,4,5]:\n\t\toutput[i].close()", "def to_json_file(test_health_list: List[TestHealthInfo],\n output_path: pathlib.Path) -> None:\n test_health_dicts = _to_test_health_dicts(test_health_list)\n\n with open(output_path, 'w') as json_file:\n for test_health in test_health_dicts:\n json.dump(test_health, json_file, allow_nan=False)\n json_file.write('\\n')", "def cat_json(output_filename, input_filenames):\n\twith open(output_filename, \"w\") as outfile:\n\t\tfirst = True\n\t\tcounter = -1\n\t\tfor infile_name in input_filenames:\n\t\t\twith open(infile_name) as infile:\n\t\t\t\tif first:\n\t\t\t\t\toutfile.write('{')\n\t\t\t\t\tfirst = False\n\t\t\t\telse:\n\t\t\t\t\toutfile.write(',')\n\t\t\t\toutfile.write(mangle(infile.read(), counter))\n\t\t\t\tcounter -= 1\n\t\toutfile.write('}')", "def make_plot_for_different_thresholds(\n lambda_2,\n lambda_1,\n mu,\n num_of_servers,\n num_of_trials,\n seed_num=None,\n measurement_type=None,\n runtime=1440,\n max_threshold=None,\n):\n all_ambulance_patients_mean_times = []\n all_other_patients_mean_times = []\n all_total_mean_times = []\n if max_threshold == None:\n max_threshold = num_of_servers\n for threshold in range(1, max_threshold + 1):\n current_ambulance_patients_mean_times = []\n current_other_patients_mean_times = []\n current_total_mean_times = []\n for _ in range(num_of_trials):\n times = get_times_for_patients(\n lambda_2,\n lambda_1,\n mu,\n num_of_servers,\n threshold,\n seed_num,\n measurement_type,\n runtime,\n )\n current_ambulance_patients_mean_times.append(np.nanmean(times[0]))\n current_other_patients_mean_times.append(np.nanmean(times[1]))\n current_total_mean_times.append(np.nanmean(times[0] + times[1]))\n all_ambulance_patients_mean_times.append(\n np.nanmean(current_ambulance_patients_mean_times)\n )\n all_other_patients_mean_times.append(\n np.nanmean(current_other_patients_mean_times)\n )\n all_total_mean_times.append(np.nanmean(current_total_mean_times))\n\n x_axis = [thres for thres in range(1, max_threshold + 1)]\n x_axis_label, y_axis_label, title = get_plot_for_different_thresholds_labels(\n measurement_type\n )\n plt.figure(figsize=(23, 10))\n diff_threshold_plot = plt.plot(\n x_axis,\n all_ambulance_patients_mean_times,\n \"solid\",\n x_axis,\n all_other_patients_mean_times,\n \"solid\",\n x_axis,\n all_total_mean_times,\n \"solid\",\n )\n plt.title(title, fontsize=13, fontweight=\"bold\")\n plt.xlabel(x_axis_label, fontsize=13, fontweight=\"bold\")\n plt.ylabel(y_axis_label, fontsize=13, fontweight=\"bold\")\n plt.legend(\n [\"Ambulance Patients\", \"Other Patients\", \"All times\"], fontsize=\"x-large\"\n )\n\n return diff_threshold_plot", "def grouping(filename, outdir, minsog, maxsog):\n records = Records(Extractor.extract_records(filename))\n\n groups = records.group(minsog, maxsog)\n for key in groups:\n rw = RecordsWriter(groups[key])\n rw.write_to_dir(key + \".fasta\", outdir)", "def send_results_file_json(**kwargs):\n try:\n logging.debug(\"Opening json output file for writing\")\n with open(kwargs[\"output_file_json\"], \"w\") as file_json_open:\n logging.info(\n \"Writing to output json file: \" + kwargs[\"output_file_json\"]\n )\n file_json_open.write(kwargs[\"results_dataset_json\"])\n return True\n except IOError:\n logging.exception(\"Error writing results to json output file\")\n return False", "def _consolidate_events(self):\n for event_file in self._most_recent_event_files():\n with open(event_file, \"r\") as f:\n for line in f.readlines():\n record = json.loads(line)\n event = deserialize_event(record)\n self._events[event.name].append(event)\n for name in self._events.keys():\n self._events[name].sort(key=lambda x: x.timestamp)", "def dump_to_file(final_results):\n\t#Add prefix result\n\tif final_results[\"Results\"][\"Test passed\"] == True:\n\t\ttime_now = time.time()\n\t\touput_filepath = checklists_filepath.replace(\".json\", \"\", 1) + \"_\" + datetime.datetime.fromtimestamp(time_now).strftime('%Y-%m-%d_%Hh%Mm%Ss') + \"_PASSED.json\"\n\telse:\n\t\ttime_now = time.time()\n\t\touput_filepath = checklists_filepath.replace(\".json\", \"\", 1) + \"_\" + datetime.datetime.fromtimestamp(time_now).strftime('%Y-%m-%d_%Hh%Mm%Ss') + \"_FAILED.json\"\n\twith open(ouput_filepath, 'w') as fp:\n\t\tjson.dump(final_results, fp)\n\treturn ouput_filepath", "def generate_differ_times_one_file():\n fname = 'resources/differ_times.json'\n\n stress = np.linspace(0, 100)\n stress_time = np.linspace(0, rnd.randint(1, 100))\n strain = np.linspace(0, 100)\n strain_time = np.linspace(0, rnd.randint(1, 100)) # generates a random time interval\n expected = pif.System(\n subSystems=None,\n properties=[\n pif.Property(name='stress',\n scalars=list(stress),\n conditions=pif.Value(\n name='time',\n scalars=list(stress_time))),\n\n pif.Property(name='strain',\n scalars=list(strain),\n conditions=pif.Value(\n name='time',\n scalars=list(strain_time)))\n ])\n with open(fname, 'w') as data:\n pif.dump(expected, data)\n\n return fname", "def filter_data(self, json_data):\n\n\t\tdata = json_data['data']\n\t\tlocal_time_convertor = time_convertor.TimeConvertor()\n\n\n\t\tfor event_data in data:\n\t\t\t# go through each event and save data\n\n\t\t\t# first need to get data for all avalible sites\n\t\t\tevent_h2h_odds = []\n\t\t\tevent_site_names = []\n\t\t\tfor i, sites_data in enumerate(event_data['sites']):\n\t\t\t\tif len(sites_data['odds']['h2h']) > 2:\n\t\t\t\t\t# if more the 3 odds values (draw odds given) only take win loss odds\n\t\t\t\t\tevent_h2h_odds.append([sites_data['odds']['h2h'][0], sites_data['odds']['h2h'][1]])\n\t\t\t\telse:\n\t\t\t\t\tevent_h2h_odds.append(sites_data['odds']['h2h'])\n\t\t\t\tevent_site_names.append(sites_data['site_nice'])\n\t\t\t\n\t\t\t# append event data\n\t\t\tself.teams.append(event_data['teams'])\n\t\t\tself.h2h_odds.append(event_h2h_odds)\n\t\t\tself.betting_sites.append(event_site_names)\n\n\t\t\tlocal_time_convertor.convert_to_AEST(event_data['commence_time'])\n\t\t\tself.start_time['string format'].append(local_time_convertor.local_time_string)\n\t\t\tself.start_time['datetime format'].append(local_time_convertor.local_time)\n\n\t\t# debug helper code\n\t\t# print(self.teams)\n\t\t# print(self.betting_sites)\n\t\t# print(self.h2h_odds)", "def write_to_json(data):\n # Serializing json \n json_object = json.dumps(data, indent = 4) \n\n date = datetime.now()\n date_str = date.strftime(\"%y%m%d_%H%M\")\n \n # Writing to sample.json \n with open(\"./intraday_data/json/i_data_{0}_{1}.json\".format(date_str, int(time.time())), \"w\") as outfile: \n outfile.write(json_object)", "def vpn_specific_files(data, treshold=0.7):\n\n vpn_files = {}\n webpages = len(data)\n\n for webpage in data:\n for file in data[webpage]:\n for ext in data[webpage][file]:\n if ext not in vpn_files:\n vpn_files[ext] = {\"total\": webpages}\n else:\n if file not in vpn_files[ext]:\n vpn_files[ext][file] = 1\n else:\n vpn_files[ext][file] = vpn_files[ext][file] + 1\n\n result = {}\n\n for ext_key, ext_value in list(vpn_files.items()):\n result[ext_key] = []\n for f_key, f_value in list(vpn_files[ext_key].items()):\n if f_key == \"total\":\n continue\n if vpn_files[ext_key][f_key] >= vpn_files[ext_key][\"total\"] * treshold:\n result[ext_key].append(f_key)\n\n return result", "def create_json_report(output):\n # Initial work, just dump mia_metrics and dummy_metrics into a json structure\n return json.dumps(output, cls=NumpyArrayEncoder)", "def export_file(self):\n if self.args.keyfilter:\n self.filter_keys()\n if self.args.datafilter:\n self.filter_values()\n json.dump(self.outputdata, self.outfile, indent=self.args.indent)\n self.outfile.write('\\n')", "def write_pre(df, df_rain, df_runoff, df_sm, watershed):\n\n df_xy = pd.read_csv(os.path.join(os.path.dirname(__file__), 'Lucky_hills_input_data', 'WGEW_gauge_locations.csv'))\n\n avgsm = df_sm.sm5.mean()\n\n for preName in df.preName:\n\n print(f'create .pre file for event {preName}')\n\n df_ = df_rain.loc[df_rain.preName==preName, :]\n if len(df_) > 0:\n\n preOut = f'{preName}.pre'\n f = open(os.path.join('preFiles', preOut), 'w')\n f.write(f'! {len(df_.gauge.unique())} gauge(s)\\n\\n')\n\n for gauge in df_.gauge.unique():\n\n X = df_xy.loc[(df_xy.Gage==gauge) & (df_xy.Watershed==watershed), 'East'].values[0]\n Y = df_xy.loc[(df_xy.Gage==gauge) & (df_xy.Watershed==watershed), 'North'].values[0]\n\n SAT = df_sm[df_sm.preName==preName].sm5.values[0]\n if math.isnan(SAT):\n SAT = avgsm\n f.write(f'! Estimated Initial Soil Moisture at gauge {gauge}\\n')\n\n df_g = df_.loc[df_.gauge==gauge, ['elapsedTime', 'accDepth']]\n if len(df_g) > 0:\n if df_g.elapsedTime.min() != 0.0:\n df_g.loc[len(df_g), :]= [0,0]\n df_g.sort_values(by=['elapsedTime'], inplace=True)\n\n f.write(f'! Event Start at: {df_.timeStamp.min()} at GAUGE #{gauge}\\n')\n f.write(f'\\nBEGIN GAUGE {gauge}\\n')\n f.write(f'X = {X}\\n')\n f.write(f'Y = {Y}\\n')\n f.write(f'SAT = {SAT:.4f}\\n')\n f.write(f'N = {len(df_g)}\\n\\n')\n f.write('TIME DEPTH ! (mm)\\n')\n for minute, depth in zip(df_g.elapsedTime, df_g.accDepth):\n f.write(f'{minute:.2f} {depth:.4f}\\n')\n f.write('END\\n\\n')\n\n f.close()", "def clean_pstamp(args):\n params = Main_param(args)\n for i, filt in enumerate(params.filters):\n print \"Running filter\", filt\n if os.path.isdir(params.path + 'stamp_stats') is False:\n subprocess.call([\"mkdir\", params.path + 'stamp_stats'])\n # open image and seg map\n catalog = Table.read(params.cat_files[filt], format=\"ascii.basic\")\n hlr = catalog['A_IMAGE'][int(params.num)]\n hdu1 = pyfits.open(params.gal_files[filt])\n hdu2 = pyfits.open(params.seg_files[filt])\n im_dat = hdu1[0].data\n im_hdr = hdu1[0].header\n seg_dat = hdu2[0].data\n hdu1.close()\n hdu2.close()\n shape = im_dat.shape\n x0, y0 = shape[0] / 2, shape[1] / 2\n # classify pixels as belonging to image, other objects and background\n # using segmentation map\n im, bl, oth, oth_segs, check = div_pixels(seg_dat, params.num)\n # Some bright object is nearby, and its seg map overlaps with central object\n # manually set output values so it fails selection tests later\n if len(im) == 0:\n print \"Ignore object\"\n peak_val = 0\n min_dist = 0.\n avg_flux = 999.99\n snr = -10.\n info = [0, 0, 0, min_dist, avg_flux, peak_val, snr]\n np.savetxt(params.path + 'stamp_stats/' + params.num + '_' + filt + '.txt', info)\n new_im_name = params.path + filt + '_' + params.seg_id + '_' + params.num + '_gal.fits'\n pyfits.writeto(new_im_name, im_dat, im_hdr, clobber=True)\n continue\n # Objects seg map covers entire pstamp, no blank region\n # manually set output values so it fails selection tests later\n if (len(bl) <= 1):\n print \"Ignore object\"\n peak_val = 0\n min_dist = 0.\n avg_flux = 999.99\n snr = -10.\n info = [0, 0, 0, min_dist, avg_flux, peak_val, snr]\n np.savetxt(params.path + 'stamp_stats/' + params.num + '_' + filt + '.txt', info)\n new_im_name = params.path + filt + '_' + params.seg_id + '_' + params.num + '_gal.fits'\n pyfits.writeto(new_im_name, im_dat, im_hdr, clobber=True)\n continue\n peak_val = np.max([[im_dat[im[i][0]][im[i][1]]] for i in range(len(im))])\n bck_pixels = [im_dat[bl[i][0], bl[i][1]] for i in range(len(bl))]\n b_mean, b_std = get_stats(np.array(bck_pixels), str='Image Background')\n # No other object present\n if len(oth_segs) == 0:\n print \"No other object\"\n print len(bl)\n min_dist = 999.99\n pix_near_dist = [shape[0] / 2, shape[1] / 2]\n avg_flux = 0.\n snr = get_snr(im_dat, b_std**2, hlr)\n info = [b_mean, b_std, np.sum(im_dat), min_dist, avg_flux, peak_val, snr]\n print info\n np.savetxt(params.path + 'stamp_stats/' + params.num + '_' + filt + '.txt', info)\n new_im_name = params.path + filt + '_' + params.seg_id + '_' + params.num + '_gal.fits'\n pyfits.writeto(new_im_name, im_dat, im_hdr, clobber=True)\n continue\n new_im = im_dat.copy()\n min_dists = []\n pix_min_dists = []\n for oth_seg in oth_segs:\n print \"Other object detected with id \", oth_seg\n print 'MASKING: ', len(oth[oth_seg]), ' pixels out of ', seg_dat.size\n print \" Noise file used \", params.noise_file\n dist, pix = get_min_dist(x0, y0, np.array(oth[oth_seg]))\n noise_file = params.noise_file[filt]\n new_im = change_others(new_im, np.array(oth[oth_seg]), noise_file, b_std)\n min_dists.append(dist)\n pix_min_dists.append(pix)\n min_dist = np.min(min_dists)\n pix_near_dist = pix_min_dists[np.argmin(min_dists)]\n avg_flux = get_avg_around_pix(pix_near_dist[0], pix_near_dist[1], im_dat)\n snr = get_snr(new_im, b_std**2, hlr)\n info = [b_mean, b_std, np.sum(im_dat), min_dist, avg_flux, peak_val, snr]\n np.savetxt(params.path + 'stamp_stats/' + params.num + '_' + filt + '.txt', info)\n new_im_name = params.path + filt + '_' + params.seg_id + '_' + params.num + '_gal.fits'\n print 'CREATED NEW POSTAGE STAMP', new_im_name\n pyfits.writeto(new_im_name, new_im, im_hdr, clobber=True)", "def _events_tsv(events, durations, raw, fname, trial_type, overwrite=False):\n # Start by filling all data that we know into an ordered dictionary\n first_samp = raw.first_samp\n sfreq = raw.info[\"sfreq\"]\n events = events.copy()\n events[:, 0] -= first_samp\n\n # Onset column needs to be specified in seconds\n data = OrderedDict(\n [\n (\"onset\", events[:, 0] / sfreq),\n (\"duration\", durations),\n (\"trial_type\", None),\n (\"value\", events[:, 2]),\n (\"sample\", events[:, 0]),\n ]\n )\n\n # Now check if trial_type is specified or should be removed\n if trial_type:\n trial_type_map = {v: k for k, v in trial_type.items()}\n data[\"trial_type\"] = [trial_type_map.get(i, \"n/a\") for i in events[:, 2]]\n else:\n del data[\"trial_type\"]\n\n _write_tsv(fname, data, overwrite)", "def load_data(fout, dir_in, run):\n\n path = dir_in + '/' + run + '/kdst/'\n files_all = [path + f for f in os.listdir(path) \\\n if os.path.isfile( os.path.join(path, f) )]\n dst = load_dsts(files_all, \"DST\", \"Events\")\n time_run = dst.time.mean()\n\n # count number of number of unique entries\n unique_events = ~dst.event.duplicated()\n #unique_events = dst.event.nunique()\n nunique_events = dst.event.nunique()\n\n #print(nunique_events)\n\n num_of_S2s = np.size (unique_events)\n num_of_evts = np.count_nonzero(unique_events)\n\n print(num_of_evts)\n fout.write(f\"dst_entries {str(len(dst))}\\n\")\n fout.write(f\"time_run {time_run}\\n\")\n fout.write(f\"s2_tot {num_of_S2s}\\n\")\n fout.write(f\"evt_tot {num_of_evts}\\n\")\n\n # compute number of s1 and s2\n df = dst[~dst.time.duplicated()]\n tot_ev = df.event.nunique()\n s1_num = df.nS1.values\n s2_num = df.nS2.values\n fout.write(f\"num_of_ev_check {tot_ev}\\n\")\n\n s1_0 = np.count_nonzero(s1_num == 0)\n s1_1 = np.count_nonzero(s1_num == 1)\n s1_2 = np.count_nonzero(s1_num == 2)\n s1_3 = np.count_nonzero(s1_num == 3)\n s1_4 = np.count_nonzero(s1_num == 4)\n s1_5 = np.count_nonzero(s1_num == 5)\n s1_6 = np.count_nonzero(s1_num == 6)\n\n s2_0 = np.count_nonzero(s2_num == 0)\n s2_1 = np.count_nonzero(s2_num == 1)\n s2_2 = np.count_nonzero(s2_num == 2)\n s2_3 = np.count_nonzero(s2_num == 3)\n s2_4 = np.count_nonzero(s2_num == 4)\n s2_5 = np.count_nonzero(s2_num == 5)\n s2_6 = np.count_nonzero(s2_num == 6)\n s2_7 = np.count_nonzero(s2_num == 7)\n s2_8 = np.count_nonzero(s2_num == 8)\n\n fout.write(f'eff_0s1 {s1_0 /tot_ev*100:.5f}\\n')\n fout.write(f'eff_0s1_u {error_eff(tot_ev, s1_0 /tot_ev)*100:.5f}\\n')\n\n fout.write(f'eff_1s1 {s1_1 /tot_ev*100:.5f}\\n')\n fout.write(f'eff_1s1_u {error_eff(tot_ev, s1_1 /tot_ev)*100:.5f}\\n')\n\n fout.write(f'eff_2s1 {s1_2 /tot_ev*100:.5f}\\n')\n fout.write(f'eff_2s1_u {error_eff(tot_ev, s1_2 /tot_ev)*100:.5f}\\n')\n\n fout.write(f'eff_3s1 {s1_3 /tot_ev*100:.5f}\\n')\n fout.write(f'eff_3s1_u {error_eff(tot_ev, s1_3 /tot_ev)*100:.5f}\\n')\n\n fout.write(f'eff_4s1 {s1_4 /tot_ev*100:.5f}\\n')\n fout.write(f'eff_4s1_u {error_eff(tot_ev, s1_4 /tot_ev)*100:.5f}\\n')\n\n fout.write(f'eff_5s1 {s1_5 /tot_ev*100:.5f}\\n')\n fout.write(f'eff_5s1_u {error_eff(tot_ev, s1_5 /tot_ev)*100:.5f}\\n')\n\n fout.write(f'eff_6s1 {s1_6 /tot_ev*100:.5f}\\n')\n fout.write(f'eff_6s1_u {error_eff(tot_ev, s1_6 /tot_ev)*100:.5f}\\n')\n\n# s2 eff\n fout.write(f'eff_0s2 {s2_0 /tot_ev*100:.5f}\\n')\n fout.write(f'eff_0s2_u {error_eff(tot_ev, s2_0/tot_ev)*100:.5f}\\n')\n\n fout.write(f'eff_1s2 {s2_1 /tot_ev*100:.5f}\\n')\n fout.write(f'eff_1s2_u {error_eff(tot_ev, s2_1/tot_ev)*100:.5f}\\n')\n\n fout.write(f'eff_2s2 {s2_2 /tot_ev*100:.5f}\\n')\n fout.write(f'eff_2s2_u {error_eff(tot_ev, s2_2/tot_ev)*100:.5f}\\n')\n\n fout.write(f'eff_3s2 {s2_3 /tot_ev*100:.5f}\\n')\n fout.write(f'eff_3s2_u {error_eff(tot_ev, s2_3/tot_ev)*100:.5f}\\n')\n\n fout.write(f'eff_4s2 {s2_4 /tot_ev*100:.5f}\\n')\n fout.write(f'eff_4s2_u {error_eff(tot_ev, s2_4/tot_ev)*100:.5f}\\n')\n\n fout.write(f'eff_5s2 {s2_5 /tot_ev*100:.5f}\\n')\n fout.write(f'eff_5s2_u {error_eff(tot_ev, s2_5/tot_ev)*100:.5f}\\n')\n\n fout.write(f'eff_6s2 {s2_6 /tot_ev*100:.5f}\\n')\n fout.write(f'eff_6s2_u {error_eff(tot_ev, s2_6/tot_ev)*100:.5f}\\n')\n\n fout.write(f'eff_7s2 {s2_7 /tot_ev*100:.5f}\\n')\n fout.write(f'eff_7s2_u {error_eff(tot_ev, s2_7/tot_ev)*100:.5f}\\n')\n\n fout.write(f'eff_8s2 {s2_8 /tot_ev*100:.5f}\\n')\n fout.write(f'eff_8s2_u {error_eff(tot_ev, s2_8/tot_ev)*100:.5f}\\n')\n\n\n return dst", "def WriteValuesToJSONFile(self, state, values):\n value_counters = {}\n max_post_size = config.CONFIG[\"BigQuery.max_file_post_size\"]\n for value in values:\n class_name = value.__class__.__name__\n output_tracker, created = self._GetTempOutputFileHandles(class_name)\n\n # If our output stream is getting huge we should flush everything now and\n # set up new output files. Only start checking when we are getting within\n # range of the limit because we need to flush the stream to check the\n # size. Start counting at 0 so we check each file the first time.\n value_counters[class_name] = value_counters.get(class_name, -1) + 1\n if not value_counters[class_name] % max_post_size // 1000:\n\n # Flush our temp gzip handle so we can stat it to see how big it is.\n output_tracker.gzip_filehandle.flush()\n if os.path.getsize(output_tracker.gzip_filehandle.name) > max_post_size:\n # Flush what we have and get new temp output handles.\n self.Flush(state)\n value_counters[class_name] = 0\n output_tracker, created = self._GetTempOutputFileHandles(class_name)\n\n if not output_tracker.schema:\n output_tracker.schema = self.RDFValueToBigQuerySchema(value)\n\n if created:\n # Omit the leading newline for the first entry in the file.\n self._WriteJSONValue(output_tracker.gzip_filehandle, value)\n else:\n self._WriteJSONValue(\n output_tracker.gzip_filehandle, value, delimiter=\"\\n\")\n\n for output_tracker in self.temp_output_trackers.values():\n output_tracker.gzip_filehandle.flush()", "def create_export_files(n,input_choice,timing,min_hull_per):\n\n\n\texists = os.path.isdir('analysis')\n\tif exists:\n\t\tf = open('analysis/results.csv','a',newline='')\n\t\tresults = csv.writer(f)\n\telse:\n\t\tos.mkdir('analysis')\n\t\tf = open('analysis/results.csv','w',newline='')\n\t\tresults = csv.writer(f)\n\t\tresults.writerow(['Algo','Size of Input','Min. Hull Pts Per','Type of Input','Timing'])\n\n\n\tresults.writerow(['Graham Scan',n,min_hull_per,input_choice,timing])", "def fit_and_write_json(excel_file):\n print(excel_file)\n # These variables are used subsequently in the code\n x_data,y_data = read_data(excel_file)\n \n # Create a dictionary to store peaks for now\n data = {}\n \n height = []\n for i in range(169):\n peaks,_ = find_peaks(y_data[:,i],height=5000,distance=50)\n data[i] = np.array(peaks,dtype=int).tolist()\n \n # Currently the dictionary should look like {'1': 1, '2': 2, '3':2 ...} and so on\n peak_data = data\n \n # Iterating over all 13 X and 13 Ys \n for i in range(169):\n \n # If scipy.signal.find_peaks finds only one peak\n if len(peak_data[i]) == 1:\n gmodel = Model(gaussian)\n peak = x_data[peak_data[i][0],0]\n \n # Initialize appropriate singal from the peak data\n # center \"c1\" comes from the peak data itself\n c1 = peak\n a1 = y_data[peak_data[i][0],i]\n if peak <= 850:\n w1 = 20\n elif peak <= 900:\n w1 = 15\n else:\n w1 = 10\n\n # Fit using these initial estimates\n result = gmodel.fit(y_data[:,i], x=x_data[:,0],amp=a1,cen=c1,width=w1)\n y1 = gaussian(x_data,result.best_values['amp'],result.best_values['cen'],result.best_values['width'])\n new_dict = {'peak':1,'y':y_data[:,i].tolist(),'fit':result.best_fit.tolist(),\n 'y1':y1.tolist(),'mu1':result.best_values['cen']}\n \n elif len(peak_data[i]) == 2:\n # For two peaks\n peak1 = x_data[peak_data[i][0],0]\n peak2 = x_data[peak_data[i][1],0]\n \n c1 = peak1\n a1 = y_data[peak_data[i][0],i]\n c2 = peak2\n a2 = y_data[peak_data[i][1],i]\n if peak1<= 850:\n w1 = 20\n elif peak1 <= 900:\n w1 = 15\n else:\n w1 = 10\n \n if peak2<= 850:\n w2 = 20\n elif peak2 <= 900:\n w2 = 15\n else:\n w2 = 10\n\n # Fit two peaks\n gmodel = Model(gauss2)\n result = gmodel.fit(y_data[:,i], x=x_data[:,0], a1 = a1,c1=c1,w1=w1,a2=a2,c2=c2,w2=w2)\n y1 = gaussian(x_data[:,0],result.best_values['a1'],result.best_values['c1'],result.best_values['w1'])\n y2 = gaussian(x_data[:,0],result.best_values['a2'],result.best_values['c2'],result.best_values['w2'])\n new_dict = {'peak':2,'y':y_data[:,i].tolist(),'fit':result.best_fit.tolist(),\n 'y1':y1.tolist(),'y2':y2.tolist(),\n 'mu1':result.best_values['c1'],'mu2':result.best_values['c2']}\n \n else:\n peak1 = x_data[peak_data[i][0],0]\n peak2 = x_data[peak_data[i][1],0]\n peak3 = x_data[peak_data[i][2],0]\n \n c1 = peak1\n a1 = y_data[peak_data[i][0],i]\n c2 = peak2\n a2 = y_data[peak_data[i][1],i]\n c3 = peak3\n a3 = y_data[peak_data[i][2],i]\n \n if peak1<= 850:\n w1 = 20\n elif peak1 <= 900:\n w1 = 15\n else:\n w1 = 10\n \n if peak2<= 850:\n w2 = 20\n elif peak2 <= 900:\n w2 = 15\n else:\n w2 = 10\n \n if peak3<= 850:\n w3 = 20\n elif peak3 <= 900:\n w3 = 15\n else:\n w3 = 10 \n \n # Fit three peaks\n gmodel = Model(gauss3)\n result = gmodel.fit(y_data[:,i], x=x_data[:,0], a1 = a1,c1=c1,w1=w1,a2=a2,c2=c2,w2=w2,a3=a3,c3=c3,w3=w3)\n y1 = gaussian(x_data[:,0],result.best_values['a1'],result.best_values['c1'],result.best_values['w1'])\n y2 = gaussian(x_data[:,0],result.best_values['a2'],result.best_values['c2'],result.best_values['w2'])\n y3 = gaussian(x_data[:,0],result.best_values['a3'],result.best_values['c3'],result.best_values['w3'])\n new_dict = {'peak':3,'y':y_data[:,i].tolist(),'fit':result.best_fit.tolist(),'y1':y1.tolist(),\n 'y2':y2.tolist(),'y3':y3.tolist(),\n 'mu1':result.best_values['c1'],'mu2':result.best_values['c2'],\n 'mu3':result.best_values['c3']}\n peak_data[i] = new_dict\n \n \n # At this point all the fitting is completed\n # Write the data into a json file\n new_file_name = 'fitted_data/fitted_'+excel_file[5:]+'.json'\n with open(new_file_name, 'w') as outfile:\n ujson.dump(peak_data, outfile)", "def WriteReachedData(filename, page_to_reached_data):\n json_object = []\n for (offset, data) in page_to_reached_data.items():\n json_object.append({'offset': offset, 'total': data['total'],\n 'reached': data['reached']})\n with open(filename, 'w') as f:\n json.dump(json_object, f)", "def analyze_thresholds(datapath, threshold_lt1, threshold_lt2, normalize = True, save = 1):\n print 'analyzing thresholds...' \n current_dir = os.getcwd()\n os.chdir(datapath)\n files = os.listdir(datapath)\n\n for k in arange(len(files)):\n right_file = '.npz' in files[k]\n \n if right_file:\n data = numpy.load(datapath+'\\\\'+files[k])\n \n CR_cts_after_seq_lt1 = data['cr_hist_LT1_first']\n CR_cts_after_seq_lt2 = data['cr_hist_LT2_first']\n\n nr_of_counts = arange(len(CR_cts_after_seq_lt1))\n\n CR_cts_total_lt1 = data['cr_hist_LT1_total']\n CR_cts_total_lt2 = data['cr_hist_LT2_total']\n \n if normalize:\n CR_cts_after_seq_lt2 = CR_cts_after_seq_lt2/float(sum(CR_cts_after_seq_lt2))\n CR_cts_total_lt2 = CR_cts_total_lt2/float(sum(CR_cts_total_lt2))\n times_passed_after_seq_lt2 = CR_cts_after_seq_lt2[nr_of_counts>=threshold_lt2].sum()*100\n times_passed_overall_lt2 = CR_cts_total_lt2[nr_of_counts>=threshold_lt2].sum()*100\n \n CR_cts_after_seq_lt1 = CR_cts_after_seq_lt1/float(sum(CR_cts_after_seq_lt1))\n CR_cts_total_lt1 = CR_cts_total_lt1/float(sum(CR_cts_total_lt1))\n times_passed_after_seq_lt1 = CR_cts_after_seq_lt1[nr_of_counts>=threshold_lt1].sum()*100\n times_passed_overall_lt1 = CR_cts_total_lt1[nr_of_counts>=threshold_lt1].sum()*100\n else:\n times_passed_after_seq_lt2 = CR_cts_after_seq_lt2[nr_of_counts>=threshold_lt2].sum()/float(CR_cts_after_seq_lt2.sum())*100\n times_passed_overall_lt2 = CR_cts_total_lt2[nr_of_counts>=threshold_lt2].sum()/float(CR_cts_total_lt2.sum())*100\n times_passed_after_seq_lt1 = CR_cts_after_seq_lt1[nr_of_counts>=threshold_lt1].sum()*100/float(CR_cts_after_seq_lt1.sum())\n times_passed_overall_lt1 = CR_cts_total_lt1[nr_of_counts>=threshold_lt1].sum()*100/float(CR_cts_total_lt1.sum())\n\n\n #print 'After sequence: LT2 percentage passed = ',num2str(sum(times_passed_after_seq_lt2),1),'%'\n #print 'and LT1 percentage passed = ',num2str(sum(times_passed_after_seq_lt1),1),'%'\n\n Log = False\n\n figure6 = plt.figure(figsize=(16.0, 12.0))\n plt.subplot(223)\n plt.bar(nr_of_counts,CR_cts_after_seq_lt2,log=Log, color = 'm')\n plt.xlabel('Number of counts')\n plt.ylabel('Fraction of occurrences')\n if normalize:\n plt.title('LT2: CR counts after sequence, passed threshold: '+num2str(times_passed_after_seq_lt2,1)+'%')\n else:\n plt.title('CR counts after sequence')\n plt.xlim(0,25)\n \n plt.subplot(224)\n plt.bar(nr_of_counts,CR_cts_total_lt2,log=Log, color = 'm')\n plt.xlabel('Number of counts')\n plt.ylabel('Fraction of occurrences')\n if normalize:\n plt.title('LT2: all CR checks, passed threshold: '+num2str(times_passed_overall_lt2,1)+'%')\n else:\n plt.title('CR counts for all CR checks')\n plt.xlim(0,25)\n\n plt.subplot(221)\n plt.bar(nr_of_counts,CR_cts_after_seq_lt1,log=Log, color = 'b')\n plt.xlabel('Number of counts')\n plt.ylabel('Fraction of occurrences')\n if normalize:\n plt.title('LT1: CR counts after sequence, passed threshold: '+num2str(times_passed_after_seq_lt1,1)+'%')\n else:\n plt.title('CR counts after sequence')\n plt.xlim(0,50)\n \n plt.subplot(222)\n plt.bar(nr_of_counts,CR_cts_total_lt1,log=Log, color = 'b')\n plt.xlabel('Number of counts')\n plt.ylabel('Fraction of occurrences')\n if normalize:\n plt.title('LT1: all CR checks, passed threshold: '+num2str(times_passed_overall_lt1,1)+'%')\n else:\n plt.title('CR counts for all CR checks')\n plt.xlim(0,50)\n \n if save:\n if normalize:\n figure6.savefig('CR_information_LT1_and_LT2_normalized.pdf')\n else:\n figure6.savefig('CR_information_LT1_and_LT2.pdf')\n\n\n return times_passed_overall_lt1, times_passed_after_seq_lt1, times_passed_overall_lt2, times_passed_after_seq_lt2", "def _generate_step_results(json_filename=\"jsonoutput.json\"):\n if debug:\n print('[DEBUG] Func: _generate_step_results...')\n\n testrail_format = {\"custom_step_results\": []} # Json format that TestRail requires\n step_counter = 1\n\n with open(json_filename, 'r') as json_file_obj:\n json_file_contents = json.loads(json_file_obj.read())\n json_file_obj.close()\n\n for feature in json_file_contents:\n for step in feature['elements'][0]['steps']:\n\n # Create \"content\": \"Step j\"\n test_step = {}\n\n test_step[\"content\"] = \"Step \" + str(step_counter)\n\n # Create \"expected\": \"foo\"\n test_step[\"name\"] = str(step['name'])\n test_step[\"expected\"] = \"passed\"\n\n # Create \"actual\": \"foo\"\n test_step[\"actual\"] = str(step['result']['status'])\n\n # Create \"status_id\": 1 pass, 4 retest\n test_step[\"status_id\"] = 1 if test_step[\"expected\"] == test_step[\"actual\"] else 4\n\n testrail_format['custom_step_results'].append(test_step)\n\n step_counter += 1\n\n return json.dumps(testrail_format[\"custom_step_results\"])", "def create_collection_report(output_dir, output_file_name, k8s_cli, namespaces, start_time, mode):\n\n with open(os.path.join(output_dir, 'collection_report.json'), \"w\") as output_fh:\n json.dump({\n \"output_file_name\": output_file_name,\n \"k8s_cli\": k8s_cli,\n \"namespaces\": namespaces,\n \"start_time\": start_time,\n \"mode\": mode,\n \"log_collector_version\": VERSION_LOG_COLLECTOR\n }, output_fh)", "def filter_json_file(filename, filters_list):\n filename = filename.replace(\" \", \"\")\n print(\"Filters List {}\".format(filters_list))\n if isinstance(filters_list, list) is False:\n filters_list = [filters_list]\n with open(robot_dir + \"/output/original/{}_orig.json\".format(filename), \"r\") as file_orig:\n # Get the root Json Object\n json_root_object = json.load(file_orig)\n\n if \"commitId\" in json_root_object:\n del json_root_object[\"commitId\"]\n\n # Store the original json object\n orig_json_object = json.dumps(json_root_object)\n root_key = json_root_object.keys()[0]\n\n if root_key == \"message\" and len(json_root_object.keys()) > 1:\n root_key = json_root_object.keys()[1]\n\n # Get the array of objects under the root\n try:\n if len(filters_list) > 0:\n json_root_object = delete_keys_from_dict(json_root_object, filters_list)\n except:\n print('Failed to delete keys from json_root_object before iteration')\n\n if root_key in ROOT_RETURN_ELEMENTS:\n json_array = json_root_object[root_key]\n\n if isinstance(json_array, list):\n # Clear the original array\n json_root_object[root_key] = []\n\n # For each object remove any of the keys that are specified in the filter list then put the object back into\n # the array\n for json_object in json_array:\n if len(filters_list) > 0:\n json_object = delete_keys_from_dict(json_object, filters_list)\n json_root_object[root_key].append(json_object)\n # json_root_object = sorted(json_root_object)\n elif isinstance(json_array, str):\n json_root_object = json_array\n\n # Serialize the JSON object before writing it to the file\n json_object = json.dumps(json_root_object, sort_keys=True, indent=4, separators=(',', ': '))\n # Write to result .json file:\n filtered_filename = robot_dir + \"/output/results/{}.json\".format(filename)\n filtered_file = open(filtered_filename, \"w\")\n # print(json_object)\n filtered_file.writelines(json_object)\n filtered_file.close()\n\n # return filtered_output, orig_output\n return json_object, orig_json_object", "def export_events(self, output_file, params, format='json', timezone_offset=None, add_gzip_header=False,\n compress=False, request_per_day=False, raw_stream=False, buffer_size=1024):\n # Increase timeout to 20 minutes if it's still set to default, /export requests can take a long time\n timeout_backup = self.timeout\n if self.timeout == 120:\n self.timeout = 1200\n\n request_count = 0\n if request_per_day:\n date_format = '%Y-%m-%d'\n f = datetime.datetime.strptime(params['from_date'], date_format)\n t = datetime.datetime.strptime(params['to_date'], date_format)\n delta = t - f\n request_count = delta.days\n\n for x in range(request_count + 1):\n params_copy = deepcopy(params)\n current_file = output_file\n\n if request_per_day:\n d = time.strptime(params['from_date'], date_format)\n current_day = (datetime.date(d.tm_year, d.tm_mon, d.tm_mday) + datetime.timedelta(x)).strftime(\n date_format)\n file_components = output_file.split('.')\n current_file = file_components[0] + \"_\" + current_day\n if len(file_components) > 1:\n current_file = current_file + '.' + file_components[1]\n params_copy['from_date'] = current_day\n params_copy['to_date'] = current_day\n\n events = self.query_export(params_copy, add_gzip_header=add_gzip_header, raw_stream=raw_stream)\n\n if raw_stream:\n if add_gzip_header and current_file[-3:] != '.gz':\n current_file = current_file + '.gz'\n with open(current_file, 'wb') as fp:\n shutil.copyfileobj(events, fp, buffer_size)\n else:\n if timezone_offset is not None:\n # Convert timezone_offset from hours to seconds\n timezone_offset = timezone_offset * 3600\n for event in events:\n event['properties']['time'] = int(event['properties']['time'] - timezone_offset)\n\n Mixpanel.export_data(events, current_file, format=format, compress=compress)\n\n # If we modified the default timeout above, restore default setting\n if timeout_backup == 120:\n self.timeout = timeout_backup", "def summarize_series(fglob, outfile):\n with open(outfile, mode='w') as of:\n #Iterate over files\n flist = glob(fglob)\n flist = sorted(flist)\n lgrho = [] #list of log(rho) values, parallel to the list of rxnmap maps\n rxns = [] #list of maps of form 'rxn_name' --> energy release [erg/g/s]\n for f in flist:\n rxnmap = {}\n currxn = ''\n eps_nuc = ''\n for line in open(f,mode='r'):\n if not currxn and line.count('reaction name') == 1:\n i1 = line.index('<') + 1\n i2 = line.index('>')\n currxn = line[i1:i2]\n elif currxn and line.count('eps_nuc') == 1:\n eps_nuc = float(line.partition(':')[2].strip())\n rxnmap[currxn] = eps_nuc\n currxn = ''\n elif line.count('log rho') == 1:\n lgrho.append(line.partition('rho')[2].strip())\n srtmap = sorted(rxnmap.items(), key=operator.itemgetter(1), reverse=True) #sort on values\n rxns.append(srtmap)\n\n #Write header\n of.write('log(rho): ' + (' {:3.3s} |'*len(lgrho)).format(*lgrho) + '\\n')\n\n #Write rows of data for each logrho, include top ten rxns\n start = ' '\n for i in range(10):\n of.write(start)\n for tup in rxns:\n of.write('{:23s}'.format(tup[i][0]))\n of.write('\\n')\n of.write(start)\n for tup in rxns:\n of.write('{:<23.8e}'.format(tup[i][1]))\n of.write('\\n\\n')", "def create_result_json(json_object, result_json_file):\n write_json_to_file(json_object, result_json_file)", "def test_json(test_data,tmp_path):\n\n for d in test_data:\n\n gpm = GenotypePhenotypeMap(wildtype=d[\"wildtype\"],\n genotype=d[\"genotype\"],\n phenotype=d[\"phenotype\"],\n uncertainty=d[\"uncertainty\"])\n\n # Write json file\n json_file = os.path.join(tmp_path,\"tmp.json\")\n gpm.to_json(filename=json_file)\n assert os.path.isfile(json_file)\n\n # Read json file\n new_gpm = gpmap.read_json(filename=json_file)\n conftest.compare_gpmap(gpm,new_gpm)", "def write_benchmark_json(file_out, benchmark_result, benchmark, test_config = TestConfig()):\n json.dump(benchmark_result, file_out)", "def events_to_json(events):\n result = {}\n index = 0\n for e in events:\n event = {}\n event['id'] = e.id\n event['name'] = e.name\n event['datetime'] = e.datetime\n event['fee'] = e.fee\n event['max_capacity'] = e.max_capacity\n event['min_capacity'] = e.min_capacity\n result['event'+str(index)] = event\n index += 1\n return result", "def plot_events1_triggered(self):\n\n result, selectedObservations = self.selectObservations(SELECT1)\n\n if not selectedObservations:\n return\n\n if not self.pj[OBSERVATIONS][selectedObservations[0]][EVENTS]:\n QMessageBox.warning(self, programName, \"There are no events in the selected observation\")\n return\n\n for obsId in selectedObservations:\n totalMediaLength = project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId])\n if totalMediaLength == -1:\n totalMediaLength = 0\n\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations, totalMediaLength)\n\n totalMediaLength = int(totalMediaLength)\n\n if not plot_parameters[\"selected subjects\"] or not plot_parameters[\"selected behaviors\"]:\n return\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"], selectedObservations,\n plot_parameters[\"selected behaviors\"])\n\n o = {}\n\n for subject in plot_parameters[\"selected subjects\"]:\n\n o[subject] = {}\n\n for behavior in plot_parameters[\"selected behaviors\"]:\n\n if plot_parameters[\"include modifiers\"]:\n\n cursor.execute(\"SELECT distinct modifiers FROM events WHERE subject = ? AND code = ?\",\n (subject, behavior))\n distinct_modifiers = list(cursor.fetchall())\n\n for modifier in distinct_modifiers:\n cursor.execute(\n \"SELECT occurence FROM events WHERE subject = ? AND code = ? AND modifiers = ? ORDER BY observation, occurence\",\n (subject, behavior, modifier[0]))\n\n rows = cursor.fetchall()\n\n if modifier[0]:\n behaviorOut = [behavior, modifier[0].replace(\"|\", \",\")]\n\n else:\n behaviorOut = [behavior]\n\n behaviorOut_json = json.dumps(behaviorOut)\n\n if not behaviorOut_json in o[subject]:\n o[subject][behaviorOut_json] = []\n\n for idx, row in enumerate(rows):\n if POINT in self.eventType(behavior).upper():\n o[subject][behaviorOut_json].append([row[0], row[0]]) # for point event start = end\n\n if STATE in self.eventType(behavior).upper():\n if idx % 2 == 0:\n try:\n o[subject][behaviorOut_json].append([row[0], rows[idx + 1][0]])\n except:\n if NO_FOCAL_SUBJECT in subject:\n sbj = \"\"\n else:\n sbj = \"for subject <b>{0}</b>\".format(subject)\n QMessageBox.critical(self, programName,\n \"The STATE behavior <b>{0}</b> is not paired {1}\".format(\n behaviorOut, sbj))\n else:\n cursor.execute(\n \"SELECT occurence FROM events WHERE subject = ? AND code = ? ORDER BY observation, occurence\",\n (subject, behavior))\n rows = list(cursor.fetchall())\n\n if not len(rows) and plot_parameters[\"exclude behaviors\"]:\n continue\n\n if STATE in self.eventType(behavior).upper() and len(rows) % 2:\n continue\n\n behaviorOut = [behavior]\n behaviorOut_json = json.dumps(behaviorOut)\n\n if not behaviorOut_json in o[subject]:\n o[subject][behaviorOut_json] = []\n\n for idx, row in enumerate(rows):\n if POINT in self.eventType(behavior).upper():\n o[subject][behaviorOut_json].append([row[0], row[0]]) # for point event start = end\n if STATE in self.eventType(behavior).upper():\n if idx % 2 == 0:\n o[subject][behaviorOut_json].append([row[0], rows[idx + 1][0]])\n\n if not plot_events.plot_time_ranges(self.pj,\n self.timeFormat,\n self.plot_colors,\n o,\n selectedObservations[0],\n plot_parameters[\"start time\"],\n plot_parameters[\"end time\"],\n plot_parameters[\"exclude behaviors\"],\n line_width=10):\n QMessageBox.warning(self, programName, \"Check events\")", "def main():\n # Pull variables from pf\n profileref = pfget('google_mapfeed.pf', profile)\n dbname = profileref['dbname']\n path = profileref['webbase']\n finalfile = '%s/%s' % (path, profileref['file'])\n bufferfile = '%s+' % finalfile\n max_nquakes = 600\n element_fields = ['lat', 'lon', 'depth', 'time', 'local_timestring', 'utc_timestring', 'magnitude', 'auth']\n\n if verbose:\n print \"Start: Creating main JSON file '%s' for all stations at %s\" % (finalfile, time.strftime(\"%a, %d %b %Y %H:%M:%S +0000\", time.gmtime()))\n\n now = time.time()\n # Set time zone\n os.putenv('TZ','US/Pacific')\n time.tzset()\n if verbose:\n print \"The time zone is: %s\" % (time.tzname)[0]\n print \"The current time is: %s\" % now\n\n # Override defaults\n if override_number:\n if verbose:\n print \"Overriding default number of events (%d) with %d\" % (max_nquakes, override_number)\n nquakes = override_number\n else:\n nquakes = max_nquakes\n if override_timerange:\n if verbose:\n print \"Overiding default number of events (%d) with time range %d seconds\" % (max_nquakes, override_timerange)\n nquakes = False\n\n # Database processing\n if verbose:\n print \"Opening database\";\n print \"Number of events requested: %s\" % nquakes\n db = dbopen(dbname, 'r')\n\n '''\n Occasionally there is more than one magnitude for a single orid\n (such as provided by QED). We need the most recent magnitude for\n a given orid, so sort on orid and lddate, then group on orid,\n then get the most recent record number (greatest lddate) for each\n group. Add that to a dictionary we will use later.\n '''\n netmag_dict = {}\n db_netmag = dblookup(db, table='netmag')\n db_netmag.sort(['orid', 'lddate'])\n db_netmag_grp = dbgroup(db_netmag, 'orid')\n if verbose:\n print \"There are %s records\" % db_netmag_grp.query('dbRECORD_COUNT')\n for i in range(db_netmag_grp.query('dbRECORD_COUNT')):\n db_netmag_grp[3] = i\n orid, [dbptr, view, end_record, start_record] = db_netmag_grp.getv('orid', 'bundle')\n if verbose:\n print \"\\t- Iteration: %s: Orid: %s, Start record: %s, End record: %s\"% (i, orid, start_record, end_record)\n db_netmag[3] = end_record - 1\n if verbose:\n print \"\\t\\t- Magnitude: %s, Magtype: %s\" % (db_netmag.getv('magnitude')[0], db_netmag.getv('magtype')[0] )\n magnitude, magtype = db_netmag.getv('magnitude', 'magtype')\n netmag_dict[orid] = { 'rec':end_record, 'magnitude':magnitude, 'magtype':magtype }\n\n '''\n if verbose:\n for key in sorted(netmag_dict.iterkeys()):\n print \"%s: %s\" % (key, netmag_dict[key])\n '''\n\n '''\n Now get the event information\n '''\n db.lookup(table='origin')\n db.join('event')\n if verbose:\n print \"Number of joined records of event and origin tables: %s\" % db.query('dbRECORD_COUNT')\n if override_timerange:\n override_oldest = now - override_timerange\n if verbose:\n print \"Override time defined - get events in the last %s seconds - 'time >= %s'\" % (override_timerange, override_oldest)\n db.subset('time >= %d' % override_oldest)\n if verbose:\n print \"Subset on time. Number of records: %s\" % db.query('dbRECORD_COUNT')\n # Join views\n # db_joined = dbjoin(db, db_netmag)\n\n if verbose:\n print \"Subset orid == prefor\"\n db.subset('orid == prefor')\n if verbose:\n print \"Number of subsetted records: %s\" % db.query('dbRECORD_COUNT')\n print \"Subset for time != NULL\"\n db.subset('time != NULL')\n if verbose:\n print \"Number of subsetted records: %s\" % db.query('dbRECORD_COUNT')\n # We want the most recent first for the comparison with nquakes\n db.sort(['time'], reverse=True)\n if verbose:\n print \"Number of sorted records: %s\" % db.query('dbRECORD_COUNT')\n if nquakes:\n if db.query('dbRECORD_COUNT') > nquakes:\n db[3] = nquakes - 1\n min_time = db.getv('time')[0]\n db.subset(\"time >= %s\" % min_time)\n else:\n override_oldest = now - override_timerange\n if verbose:\n print \"Override time defined - get events in the last %s seconds - time > %s\" % (override_timerange, override_oldest)\n db.subset(\"time >= %s\" % override_oldest)\n # Sort in normal time - we want the most recent events plotted on top\n db.sort(('time'))\n if verbose:\n print \"Number of records without subset on time: %s\" % db.query('dbRECORD_COUNT')\n '''\n Build event dictionary\n '''\n event_dict = {'metadata':{},'events':{}}\n\n '''\n Build metadata dictionary\n '''\n if nquakes:\n event_dict['metadata']['max_nquakes'] = nquakes\n event_dict['metadata']['oldest_time_readable'] = epoch2str( int(min_time), \"%H:%M UTC %A %B %o, %Y\" )\n event_dict['metadata']['oldest_time'] = int(min_time)\n event_dict['metadata']['type'] = 'event_limited'\n elif override_oldest:\n event_dict['metadata']['time_range'] = int(override_timerange)\n event_dict['metadata']['oldest_time_readable'] = epoch2str( int(override_oldest), \"%H:%M UTC %A %B %o, %Y\" )\n event_dict['metadata']['oldest_time'] = int(override_oldest)\n event_dict['metadata']['type'] = 'time_limited'\n event_dict['metadata']['modification_time'] = int(time.time())\n event_dict['metadata']['modification_time_readable'] = epoch2str( int(time.time()), \"%H:%M UTC %A %B %o, %Y\" )\n\n '''\n Build event dictionary\n '''\n events = {}\n for i in range(db.query('dbRECORD_COUNT')):\n db[3] = i\n if verbose:\n epoch_time, orid = db.getv('time', 'orid')\n print \"\\tRecord number is: %s Orid is: %d Time is: %s\" % (db[3], orid, epoch2str(epoch_time, '%Y-%m-%d %H:%M:%S'))\n\n orid = db.getv('orid')[0]\n\n if orid in netmag_dict:\n events[i] = {}\n for ef in element_fields:\n # Parse values\n if ef is 'local_timestring' or ef is 'utc_timestring' or ef is 'time':\n value = dbgetv(db, 'time')[0]\n difference = float(now) - float(value)\n if difference < 6 * 3600:\n color = 'red'\n elif difference < 12 * 3600:\n color = 'orange'\n elif difference < 24 * 3600:\n color = 'yellow'\n elif difference < 72 * 3600:\n color = 'chartreuse'\n elif difference < 168 * 3600:\n color = 'blue'\n else:\n color = 'grey'\n events[i]['color'] = color\n elif ef is 'depth':\n value = dbgetv(db, 'depth')[0]\n elif ef is 'auth':\n value = dbgetv(db, 'auth')[0]\n elif ef is 'magnitude':\n # Magnitude\n # mlval, mbval, msval, magnitudeval, magtypeval = db.getv('ml', 'mb', 'ms', 'magnitude', 'magtype')\n # Null magnitude is -999.00\n magnitudeval = netmag_dict[orid]['magnitude']\n magtypeval = netmag_dict[orid]['magtype']\n if int(magnitudeval) > 0:\n scale = magtypeval\n value = '%.1f' % magnitudeval\n else:\n scale = ''\n value = 'N/A'\n events[i]['scale'] = scale\n else:\n value = dbgetv(db, ef)\n\n # Override formatting for specific fields\n if ef is 'lat' or ef is 'lon':\n value = '%.4f' % value\n elif ef is 'local_timestring':\n value = epoch2str( value, \"%H:%M:%S %Z %A %B %o, %Y\", \"US/Pacific\" )\n elif ef is 'utc_timestring':\n value = epoch2str( value, \"%H:%M:%S UTC %A %B %o, %Y\" )\n events[i][ef] = value\n\n full_lat, full_lon = db.getv('lat', 'lon')\n events[i]['grname'] = (grname(full_lat,full_lon)).title()\n events[i]['srname'] = (srname(full_lat,full_lon)).title()\n\n event_dict['events'] = events\n\n # Dump JSON file\n f = open(bufferfile, 'w') \n json.dump(event_dict, f, sort_keys=True, indent=2)\n f.flush()\n\n # Move the file to replace the older one\n try:\n os.rename(bufferfile, finalfile)\n except OSError:\n print \"Cannot rename JSON file from %s to %s\" % (bufferfile,finalfile)\n\n if verbose:\n print \"End: Creating main JSON file '%s' for all stations %s\" % (finalfile, time.strftime(\"%a, %d %b %Y %H:%M:%S +0000\", time.gmtime()))\n\n db.close()\n return 0", "def summary_info_events(filename):\n # filename = self.out_filename('events')\n print('Reading {}'.format(filename))\n table = Table.read(str(filename), hdu='EVENTS')\n data = dict()\n \n # Copy over header info to the summary table\n data['RA_PNT'] = np.float32(table.meta['RA_PNT'])\n data['DEC_PNT'] = np.float32(table.meta['DEC_PNT'])\n #data['GLON_PNT'] = np.float32(table.meta['GLON_PNT'])\n #data['GLAT_PNT'] = np.float32(table.meta['GLAT_PNT'])\n data['ALT_PNT'] = np.float32(table.meta['ALT_PNT'])\n data['AZ_PNT'] = np.float32(table.meta['AZ_PNT'])\n data['ZEN_PNT'] = np.float32(90. - table.meta['ALT_PNT'])\n data['ONTIME'] = np.float32(table.meta['ONTIME'])\n data['LIVETIME'] = np.float32(table.meta['LIVETIME'])\n data['DEADC'] = np.float32(table.meta['DEADC'])\n\n MJDREFI = table.meta['MJDREFI']\n MJDREFF = table.meta['MJDREFF']\n MJDREF = MJDREFI + MJDREFF\n\n TSTART_MET = table.meta['TSTART'] / 3600. / 24.\n TSTOP_MET = table.meta['TSTOP'] / 3600. / 24.\n\n start_time = Time(MJDREF + TSTART_MET, scale='tt', format='mjd')\n stop_time = Time(MJDREF + TSTOP_MET, scale='tt', format='mjd')\n\n data['TSTART'] = np.float32(start_time.utc.mjd)\n data['TSTOP'] = np.float32(stop_time.utc.mjd)\n data['TSTART_STR'] = str(start_time.utc.iso[:-4])\n data['TSTOP_STR'] = str(stop_time.utc.iso[:-4])\n\n data['N_TELS'] = table.meta['N_TELS']\n data['TELLIST'] = table.meta['TELLIST']\n try:\n data['OBJECT'] = table.meta['OBJECT']\n except KeyError:\n data['OBJECT'] = \"\"\n data['RA_OBJ'] = np.float32(table.meta['RA_OBJ'])\n data['DEC_OBJ'] = np.float32(table.meta['DEC_OBJ'])\n\n # data['OBS_MODE'] = table.meta['OBS_MODE']\n\n try:\n data['MUONEFF'] = np.float32(table.meta['MUONEFF'])\n except KeyError:\n data['MUONEFF'] = np.float32(-1)\n\n # Calculate some summary statistics for important event columns\n data['EVENT_COUNT'] = len(table)\n data['EVENT_TIME_MIN'] = table['TIME'].min()\n data['EVENT_TIME_MAX'] = table['TIME'].max()\n data['EVENT_ENERGY_MEDIAN'] = np.float32(np.median(table['ENERGY']))\n data['EVENT_RA_MEDIAN'] = np.float32(np.median(table['RA']))\n data['EVENT_DEC_MEDIAN'] = np.float32(np.median(table['DEC']))\n\n return data", "def generate_differ_times_two_files():\n fname = {'stress': 'resources/simple_stress_differ_times.json',\n 'strain': 'resources/simple_strain_differ_times.json'}\n expected = [ # makes an array of two pif systems\n pif.System(\n properties=[\n pif.Property(name='stress',\n scalars=list(np.linspace(0, 100)),\n conditions=pif.Value(\n name='time',\n scalars=list(np.linspace(0, rnd.randint(1, 100)))))]),\n\n pif.System(\n properties=[\n pif.Property(name='strain',\n scalars=list(np.linspace(0, 1)),\n conditions=pif.Value(\n name='time',\n scalars=list(np.linspace(0, rnd.randint(1, 100)))))])\n ]\n # dump the pifs into two seperate files\n with open(fname['stress'], 'w') as stress_file:\n pif.dump(expected[0], stress_file)\n with open(fname['strain'], 'w') as strain_file:\n pif.dump(expected[1], strain_file)\n\n return fname", "def file_output(matches: list, output_file_name: str = 'matches.txt'):\n with open(\"test/Matches/\" + output_file_name, 'w') as f:\n for match in matches:\n for event in match.events:\n f.write(\"%s\\n\" % event.payload)\n f.write(\"\\n\")", "def export_trajectory_as_json(trial_results: typing.Mapping[str, bson.ObjectId], filename: str,\n db_client: arvet.database.client.DatabaseClient) -> None:\n if len(trial_results) >= 1:\n json_data = {}\n added_ground_truth = False\n\n # For each trial result\n for label, trial_result_id in trial_results.items():\n trial_result = dh.load_object(db_client, db_client.trials_collection, trial_result_id)\n if trial_result is not None:\n if trial_result.success:\n if not added_ground_truth:\n added_ground_truth = True\n trajectory = trial_result.get_ground_truth_camera_poses()\n if len(trajectory) > 0:\n first_pose = trajectory[min(trajectory.keys())]\n json_data['ground_truth'] = [\n [time] + location_to_json(first_pose.find_relative(pose))\n for time, pose in trajectory.items()\n ]\n trajectory = trial_result.get_computed_camera_poses()\n if len(trajectory) > 0:\n first_pose = trajectory[min(trajectory.keys())]\n json_data[label] = [[time] + location_to_json(first_pose.find_relative(pose))\n for time, pose in trajectory.items()]\n\n with open('{0}.json'.format(filename), 'w') as json_file:\n json.dump(json_data, json_file)", "def anaylze_notifications():\n\n files = os.listdir(f\"{cur_wd}/bigbeta/stocks/outrageous_stocks/{cur_dt}/\")\n with open(f\"{cur_wd}/bigbeta/stocks/outrageous_stocks/{cur_dt}/{file}\", \"r\") as f:\n stocks = json.load(f)\n analyzed_notifications = []\n for stock in stocks:\n eod_data = wb.get_quote(stock=stock)\n analyzed_notifications.append({stock['ticker']: float(eod_data['high'])})\n\n with open(f\"{cur_wd}/bigbeta/stocks/outrageous_stocks/analyses/{cur_dt}.json\", \"w\") as f:\n json.dump(analyzed_notifications, f)\n\n return None", "def export_json(self, verbosejson=False):\n outputfile = tkinter.filedialog.asksaveasfilename(\n defaultextension=\".json\",\n filetypes=((\"javascript object notation\", \"*.json\"),\n (\"All Files\", \"*.*\")))\n if outputfile:\n joutdict = {}\n joutdict['NMEA Stats'] = self.tabs.window.nmeatracker.nmea_stats()\n joutdict['AIS Stats'] = self.tabs.window.aistracker.tracker_stats()\n joutdict['AIS Stations'] = self.tabs.window.aistracker. \\\n all_station_info(verbose=verbosejson)\n export.write_json_file(joutdict, outputfile)\n else:\n raise ExportAborted('Export cancelled by user.')", "def file_output(patient):\n import json\n outfile = open(\"{}-{}.json\".format(patient[\"First\"], patient[\"Last\"]), \"w\")\n patient_dictionary = {}\n patient_dictionary[\"First Name\"] = patient[\"First\"]\n patient_dictionary[\"Last Name\"] = patient[\"Last\"]\n patient_dictionary[\"Age\"] = patient[\"Age\"]\n patient_dictionary[\"Gender\"] = patient[\"Gender\"]\n patient_dictionary[\"Diagnosis\"] = patient[\"TSH Result\"]\n patient_dictionary[\"TSH\"] = patient[\"TSH Data\"]\n json.dump(patient_dictionary, outfile)\n outfile.close()\n return", "def create_newfile():\n date = datetime.today().strftime('%d_%m_%Y').replace(\" \", \"_\")\n file_name = screen_name + '_' + date + \".json\"\n with io.FileIO(file_name, \"w\") as file:\n file.write(\"Json\")\n file.close()\n return file_name", "def writeFile(fileName, profile, singleScores, bestMotifs, dnaScores, bestMotif):\n with open(fileName, 'w+') as f:\n f.write(strftime(\"Created on: %Y-%m-%d %H:%M:%S\\n\", localtime()))\n f.write('Best Motifs: ')\n f.write('\\n')\n json.dump(bestMotif, f)\n f.write('\\n')\n f.write('Motifs Profile: ')\n f.write('\\n')\n json.dump(profile, f)\n f.write('\\n')\n f.write('Single Scores: ')\n f.write('\\n')\n for i in range(0, len(singleScores)):\n json.dump(bestMotifs[i], f)\n f.write(': ')\n json.dump(singleScores[i], f)\n f.write('\\n')\n f.write('Motifs that have a better score than the worst scoring one: ')\n f.write('\\n')\n for scores in dnaScores:\n json.dump(scores, f)\n f.write('\\n')", "def total_fire_power_time_series(files, bounding_box):\n \n assert isinstance(bounding_box, BoundingBox)\n bb = bounding_box\n \n results = {}\n \n vals = map(_process_single_fire_power_time_series, zip(files, itertools.repeat(bb)))\n vals = (val for val in vals if val is not None)\n \n for time, val, fname in vals:\n results[time] = (val, fname)\n \n return results", "def filterHeterozygousMarkers(markerDict, newVCFdf, filters, log_file): \n #logic check\n print(\"Pre-filter: {}\".format(newVCFdf.shape))\n\n fail_counter=0\n #iterates through rows of frequencyDict and markerDict and filters out markers based on filters\n for i, frequencyDict in markerDict.items():\n #heterozygous marker data allowance\n HetFreq=frequencyDict.get('1')\n if type(HetFreq)==float and HetFreq > filters:\n newVCFdf.drop([i],axis=0, inplace=True)\n fail_counter+=1\n individualDict, markerDict= processDataFrame(newVCFdf, FilterStep=1)\n log_file.write(\"\\nFailed Heterozygous Markers Percent: {:.2f} of {} markers\\n\".format(fail_counter/len(markerDict)*100,len(markerDict))) \n print(\"\\nFailed Heterzygous Markers Percent: {:.2f} of {} markers\\n\".format(fail_counter/len(markerDict)*100, len(markerDict)))\n \n #logic check\n print(\"Post-filter: {}\".format(newVCFdf.shape))\n\n return individualDict, markerDict", "def GFFthreshold(infn,outbed):\n converterd = {'probe':nodate,'a':nodate,'b':nodate}\n logging.debug('reading GFF into record array')\n a = csv2rec(infn, \n delimiter='\\t', \n names=('chr','prog','id','start','stop','ratio','a','b','probe'),\n converterd=converterd)\n logging.debug('sorting record array')\n a.sort(order=('chr','start'))\n fout = open(outbed,'w')\n m = a.ratio.mean()\n std = a.ratio.std()\n thresh = m + 2.5 * std\n allregions = []\n region = []\n lastchr = a.chr[0]\n lastpos = None\n count = 0\n\n for data in a:\n if data.ratio < thresh:\n continue\n\n if lastpos is None:\n dist = 0\n else:\n dist = data.start - lastpos\n \n logging.debug('region is currently')\n for i in region:\n logging.debug('\\t%s' % i)\n logging.debug('this data: %s' % data)\n logging.debug('dist from last: %s' % dist)\n \n if dist > 500 or data.chr != lastchr:\n \n logging.debug('\\ndist > 500; checking region len')\n logging.debug('regionlen: %s' % len(region))\n for i in region:\n logging.debug('\\t%s' % i )\n if len(region) < 4:\n logging.debug('region not long enough, erasing')\n else:\n logging.debug('region is long enough!!!!')\n logging.debug('region to be exported is')\n for i in region:\n logging.debug('\\t%s' % i)\n chr = region[0].chr\n start = region[0].start\n stop = region[-1].stop\n fout.write('%s\\t%s\\t%s\\n' % (chr,start,stop))\n count += 1\n region = []\n\n lastpos = data.stop\n lastchr = data.chr\n logging.debug('adding %s to region' % data)\n region.append(data)\n\n if len(region) >= 4:\n logging.debug('last region will be exported')\n logging.debug('region to be exported is')\n for i in region:\n logging.debug('\\t%s' % i)\n \n chr = region[0].chr\n start = region[0].start\n stop = region[-1].stop\n fout.write('%s\\t%s\\t%s\\n' % (chr,start,stop))\n count += 1\n\n else:\n logging.debug('last region not long enough')\n\n fout.close()\n logging.debug('Number of enriched regions: %s' % count)\n logging.debug('using threshold: %s' % thresh)", "def write_pressure_log(ad2):\n pressure_settings = get_pressure_options()\n logline = (\n '{\"Time\":\"'\n + time.strftime('%H:%M:%S\",\"Date\":\"%d-%m-%Y\"', time.gmtime(gv.now))\n + ',\"Pressure\":\"'\n + str(ad2)\n + '\"}\\n'\n )\n print(logline)\n log = read_pressure_log()\n log.insert(0, logline)\n with open(\"./data/pressurelog.json\", \"w\") as f:\n if int(pressure_settings[\"records\"]) != 0:\n f.writelines(log[: int(pressure_settings[\"records\"])])\n else:\n f.writelines(log)\n return", "def create_output_filenames(input_file, output_folder, method):\n\n # Create output folder\n if os.path.exists(output_folder):\n os.system('rm -rf %s' % output_folder)\n os.makedirs(output_folder, exist_ok=True)\n os.chdir(output_folder)\n\n # Create output filenames\n screen_name = '%s_%s' % (input_file.split('/')[-1][:-4], method)\n output = {'ODresults': '%s_OD_results.csv' % screen_name,\n 'ScoreCells': '%s_OD_results_score.csv' % screen_name,\n 'Outliers': '%s_outlier_cells.csv' % screen_name,\n 'PeneAgreement': '%s_penetrance_agreement.png' % screen_name,\n 'Penetrance': '%s_penetrance.png' % screen_name,\n 'PenetranceControls': '%s_penetrance_controls.png' % screen_name,\n 'KS_Correlation': '%s_KS_correlation.png' % screen_name,\n 'WT_Percentile': '%s_WT_percentile.png' % screen_name,\n 'PCA': '%s_PCA.png' % screen_name,\n 'UMAP': '%s_UMAP.png' % screen_name,\n 'log': '%s_log.txt' % screen_name\n }\n\n return output", "def Thresholds(self) :\n \n # keep pass through thresholds\n d = { }\n\n from Hlt2Lines.Commissioning.Lines import CommissioningLines \n d.update({CommissioningLines :\n {'Prescale' : {'Hlt2PassThrough' : 0.0001,\n 'Hlt2Forward' : 0.00001,\n 'Hlt2DebugEvent' : 0.000001},\n 'Postscale' : {'Hlt2ErrorEvent' : 'RATE(0.01)'},\n # do not want debug events on lumi-exclusive Hlt1 events...\n 'DebugEvent' : {'HLT1' : \"HLT_PASS_RE('^Hlt1(?!Lumi).*Decision$')\"},\n 'ErrorEvent' : {'Priority' : 254,\n 'VoidFilter' : '',\n 'HLT2' : \"HLT_COUNT_ERRORBITS_RE('^Hlt2.*',0xffff) > 0\"},\n 'PassThrough' : {'HLT1' : \"HLT_PASS_RE('^Hlt1(?!Lumi).*Decision$')\",\n 'VoidFilter' : ''},\n 'NoBiasPassThrough' : {'HLT1' : \"HLT_PASS('Hlt1NoBiasPrescaledDecision')\",\n 'VoidFilter' : ''},\n 'Transparent' : {'HLT1' : \"HLT_PASS_RE('^Hlt1(ODIN.*|L0.*|MB.*|BeamGas.*|Velo.*|NZS.*|Incident|Tell1Error|ErrorEvent)Decision$')\",\n 'VoidFilter' : ''},\n 'Lumi' : {'HLT1' : \"HLT_PASS_SUBSTR('Hlt1Lumi')\",\n 'VoidFilter' : ''},\n 'KS0_DD' : {'HLT1' : \"HLT_PASS_RE('^Hlt1(?!Lumi).*Decision$')\",\n 'VoidFilter' : ''},\n 'KS0_LL' : {'HLT1' : \"HLT_PASS_RE('^Hlt1(?!Lumi).*Decision$')\",\n 'VoidFilter' : ''},\n 'Turbo' : ['KS0_DD', 'KS0_LL']\n }}\n )\n return d", "def testD():\n results = {}\n for threshold in ['volume', 25, 35,40, 20, ]: #adjust here\n print threshold\n results[threshold] = testC(threshold=threshold)\n return results", "def summary_info_events(filename):\n # filename = self.out_filename('events')\n print('Reading {}'.format(filename))\n table = Table.read(str(filename), hdu='EVENTS')\n data = dict()\n\n # Copy over header info to the summary table\n data['RA_PNT'] = np.float32(table.meta['RA_PNT'])\n data['DEC_PNT'] = np.float32(table.meta['DEC_PNT'])\n # data['GLON_PNT'] = np.float32(table.meta['GLON_PNT'])\n # data['GLAT_PNT'] = np.float32(table.meta['GLAT_PNT'])\n data['ALT_PNT'] = np.float32(table.meta['ALT_PNT'])\n data['AZ_PNT'] = np.float32(table.meta['AZ_PNT'])\n #data['ZEN_PNT'] = np.float32(90. - table.meta['ALT_PNT'])\n data['ZEN_PNT'] = np.float32(90. - table['ALT'].mean())\n data['ONTIME'] = np.float32(table.meta['ONTIME'])\n data['LIVETIME'] = np.float32(table.meta['LIVETIME'])\n data['DEADC'] = np.float32(table.meta['DEADC'])\n\n MJDREFI = table.meta['MJDREFI']\n MJDREFF = table.meta['MJDREFF']\n MJDREF = MJDREFI + MJDREFF\n\n TSTART_MET = table.meta['TSTART'] / 3600. / 24.\n TSTOP_MET = table.meta['TSTOP'] / 3600. / 24.\n\n start_time = Time(MJDREF + TSTART_MET, scale='tt', format='mjd')\n stop_time = Time(MJDREF + TSTOP_MET, scale='tt', format='mjd')\n\n data['TSTART'] = np.float32(start_time.utc.mjd)\n data['TSTOP'] = np.float32(stop_time.utc.mjd)\n data['TSTART_STR'] = str(start_time.utc.iso[:-4])\n data['TSTOP_STR'] = str(stop_time.utc.iso[:-4])\n\n data['N_TELS'] = table.meta['N_TELS']\n data['TELLIST'] = table.meta['TELLIST']\n try:\n data['OBJECT'] = table.meta['OBJECT']\n except KeyError:\n data['OBJECT'] = \"\"\n data['RA_OBJ'] = np.float32(table.meta['RA_OBJ'])\n data['DEC_OBJ'] = np.float32(table.meta['DEC_OBJ'])\n\n # data['OBS_MODE'] = table.meta['OBS_MODE']\n\n try:\n data['MUONEFF'] = np.float32(table.meta['MUONEFF'])\n except KeyError:\n data['MUONEFF'] = np.float32(-1)\n\n # Calculate some summary statistics for important event columns\n data['EVENT_COUNT'] = len(table)\n data['EVENT_TIME_MIN'] = table['TIME'].min()\n data['EVENT_TIME_MAX'] = table['TIME'].max()\n data['EVENT_ENERGY_MEDIAN'] = np.float32(np.median(table['ENERGY']))\n data['EVENT_RA_MEDIAN'] = np.float32(np.median(table['RA']))\n data['EVENT_DEC_MEDIAN'] = np.float32(np.median(table['DEC']))\n\n return data", "def write_to_json(dictData, fileout):\n\t# Prepare the output file\n\tfout = codecs.open(fileout, 'w', 'utf-8')\n\thwDict = prepare_hw_dict(dictData)\n\tjson.dump(hwDict, fout)\n\t# Give some summary to the user\n\tprint('JSON generated. Success!')\n\tprint('{} headwords written to JSON file.'.format(len(hwDict)))", "def read_and_save_timestamps(path, filename=\"saved\", divisor=4):\n osu_dict, wav_file = read_osu_file(path, convert=True)\n data, flow_data = get_map_notes(osu_dict, divisor=divisor)\n timestamps = [c[1] for c in data]\n with open(filename + \"_ts.json\", \"w\") as json_file:\n json.dump(np.array(timestamps).tolist(), json_file)" ]
[ "0.67087024", "0.5573654", "0.5543615", "0.5325832", "0.5257581", "0.523323", "0.5181749", "0.5180666", "0.51276666", "0.5114678", "0.51098496", "0.5098101", "0.5082852", "0.5076031", "0.50719666", "0.5061145", "0.50575876", "0.5052095", "0.5037945", "0.5035007", "0.5021967", "0.5007761", "0.49984068", "0.49963072", "0.49908468", "0.49901322", "0.49865457", "0.49742347", "0.49556065", "0.49537665", "0.49475107", "0.4939692", "0.49386594", "0.49307808", "0.49261376", "0.49244344", "0.49086052", "0.49019656", "0.4884872", "0.48755622", "0.48589453", "0.48578167", "0.48568556", "0.48522836", "0.485148", "0.4851306", "0.48480624", "0.484078", "0.48376015", "0.48298416", "0.4828992", "0.48274538", "0.48232305", "0.48130935", "0.47915977", "0.4790682", "0.4788745", "0.4783251", "0.47761112", "0.47724986", "0.477079", "0.47691315", "0.47674274", "0.47660276", "0.4758763", "0.47454172", "0.4743843", "0.474001", "0.47385743", "0.47382677", "0.47322142", "0.47319844", "0.47303033", "0.47267306", "0.47172922", "0.4712559", "0.4712554", "0.47085443", "0.470194", "0.46977556", "0.46924928", "0.46896926", "0.46819842", "0.4674959", "0.4672915", "0.46636283", "0.46615103", "0.46542647", "0.46488756", "0.4648604", "0.46391577", "0.4636943", "0.46346676", "0.46254462", "0.46141058", "0.46131775", "0.46079853", "0.46071845", "0.46042702", "0.46022823" ]
0.62420124
1
all events in one time window (not just two) used if there is more than one event occurring within a short time period. will generate an output file for every event that occurs within a given time window not to be confused with many_events, which generates output given multiple time windows. Can create files for up to 3 events within specified time window.
def multi_event(st,et,instrument_chosen,subevent): print('checking for multiple events within given time window') #creating file for time window with first events for all thresholds out_name = Path(cfg.obs_path) / database_extraction(st,et,instrument_chosen,subevent) #creating files for all second events for all thresholds new_files = two_in_one(out_name,et,subevent) #creating files for any third events for all thresholds that had a second event for file in new_files: two_in_one(file,et,subevent) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def INPUT_Periods_file(input):\n \n global events\n \n tb = 3600\n ta = 3600\n \n Period = input['min_date'].split('T')[0] + '_' + \\\n input['max_date'].split('T')[0] + '_' + \\\n str(input['min_mag']) + '_' + str(input['max_mag'])\n eventpath = os.path.join(input['datapath'], Period)\n \n len_events = len(events)\n \n input_period = open(os.path.join(os.getcwd(), 'INPUT-Periods'), 'a+')\n\n for i in range(0, len_events):\n \n str_event = str(events[i]['datetime']-tb) + '_' + \\\n str(events[i]['datetime']+ta) + '_' + \\\n str(events[i]['magnitude'] - 0.01) + '_' + \\\n str(events[i]['magnitude'] + 0.01) + '\\n'\n input_period.writelines(str_event)\n \n input_period.close()\n \n print '************************************************************' \n print 'New INPUT-Periods file is generated in your folder.'\n print 'Now, you could run the program again based on your desired event :)' \n print '************************************************************'\n \n sys.exit()", "def generate_ev_file(id_test):\n print(\"generate_ev_file\")\n \n ev_output_file_name=id_test+\".ev\"\n ev_input_file_name=id_test+\"_events.csv\"\n f_output = io.open(INPUT_PARSER_RESULTS_DIR+ev_output_file_name, \"w\",newline='\\n')\n f_input = io.open(AGRODEVS_INPUT_DIR+ev_input_file_name, \"r\")\n \n input_reader = csv.reader(f_input, delimiter=',')\n field_names_list = next(input_reader)\n if (field_names_list[0]!=\"campaign\"):\n print(\"First field of events file input should be 'campaing' but is:\"+field_names_list[0])\n print(\"Cannot generate event file\")\n return\n else:\n print(field_names_list)\n for line in input_reader:\n #generate timestamp for campaign\n #campania = int(int(ms)/100)+int(ss)*10+int(mm)*600+int(hh)*36000\n campaign = int(line[0])\n ms = (campaign*100)%1000\n ss = ((campaign*100)//1000)%60\n mm = ((campaign*100)//60000)%60\n hh = ((campaign*100)//360000)\n timeFormat = \"{:0>2d}\"\n msFormat = \"{:0>3d}\"\n timestamp_begin_event = str(timeFormat.format(hh))+\":\"+ \\\n str(timeFormat.format(mm))+\":\"+ \\\n str(timeFormat.format(ss))+\":\"+ \\\n str(msFormat.format(ms))\n timestamp_end_event = str(timeFormat.format(hh))+\":\"+ \\\n str(timeFormat.format(mm))+\":\"+ \\\n str(timeFormat.format(ss))+\":\"+ \\\n str(msFormat.format(ms+1))\n \n print(\"timestamp generated: \"+timestamp_begin_event)\n \n #generate events\n #begin events\n \n \n port_idx =0\n for event_port in line[1:]:\n port_idx=port_idx+1\n #print(\"processing port: \"+str(field_names_list[port_idx]))\n begin_event=CELL_DEVS_EXTERNAL_EVENT_BEGIN+ \\\n field_names_list[port_idx]+ \\\n \" \"+str(line[port_idx])\n \n f_output.write(timestamp_begin_event+\" \"+begin_event+\"\\n\")\n \n #end events\n port_idx=0\n for event_port in line[1:]:\n port_idx=port_idx+1\n #print(\"processing port: \"+str(field_names_list[port_idx]))\n end_event=CELL_DEVS_EXTERNAL_EVENT_ENDS+ \\\n field_names_list[port_idx]+ \\\n \" \"+str(line[port_idx])\n f_output.write(timestamp_end_event+\" \"+end_event+\"\\n\")\n \n \n \n f_input.close()\n f_output.close()", "def main():\r\n # handle arguments\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument('-t', '--time', help = 'start time', default = \"2018-12-26 18:11:08.509654\")\r\n parser.add_argument('-bd', '--min_duration', type = int, help = 'minimum duration', default = 25)\r\n parser.add_argument('-td', '--max_duration', type = int, help = 'maximum duration', default = 70)\r\n parser.add_argument('-e', '--events', type = int, help = 'how many events to generate', default = 1000)\r\n\r\n args = parser.parse_args()\r\n\r\n f = open(f\"tests/test_1.json\", \"a\")\r\n\r\n string_time = \"2019-07-08 10:40:00.423123\"\r\n\r\n current_time = datetime.datetime.strptime(string_time, '%Y-%m-%d %H:%M:%S.%f')\r\n\r\n for i in range(0, args.events):\r\n\r\n duration = random.randint(args.min_duration, args.max_duration)\r\n\r\n json = \"{\\\"timestamp\\\": \\\"\" \\\r\n + str(current_time) \\\r\n + \"\\\", \\\"translation_id\\\": \\\"5aa5b2f39f7254a75aa5\\\", \" \\\r\n \"\\\"source_language\\\": \\\"en\\\",\\\"target_language\\\":\" \\\r\n \" \\\"fr\\\",\\\"client_name\\\": \\\"easyjet\\\",\\\"event_name\\\":\" \\\r\n \"\\\"translation_delivered\\\",\\\"nr_words\\\": 30, \\\"duration\\\": \"\\\r\n + str(duration) + \"}\\n\"\r\n\r\n f.write(json)\r\n\r\n minutes = random.randint(0, 59)\r\n seconds = random.randint(0, 59)\r\n\r\n current_time += datetime.timedelta(minutes=minutes, seconds=seconds)\r\n\r\n print(f\"New file is located at inputs/{args.events}.json\")", "def export_aggregated_events(self):\n\n result, selectedObservations = self.selectObservations(MULTIPLE)\n if not selectedObservations:\n return\n\n # check if state events are paired\n out, not_paired_obs_list = \"\", []\n for obsId in selectedObservations:\n r, msg = project_functions.check_state_events_obs(obsId, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obsId], self.timeFormat)\n if not r:\n out += \"Observation: <strong>{obsId}</strong><br>{msg}<br>\".format(obsId=obsId, msg=msg)\n not_paired_obs_list.append(obsId)\n if out:\n self.results = dialog.ResultsWidget()\n self.results.setWindowTitle(programName + \" - Check selected observations\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(out)\n self.results.show()\n return\n\n parameters = self.choose_obs_subj_behav_category(selectedObservations, maxTime=0,\n flagShowIncludeModifiers=False,\n flagShowExcludeBehaviorsWoEvents=False)\n\n if not parameters[\"selected subjects\"] or not parameters[\"selected behaviors\"]:\n return\n\n # check for grouping results\n flag_group = True\n if len(selectedObservations) > 1:\n flag_group = dialog.MessageDialog(programName, \"Group events from selected observations in one file?\",\n [YES, NO]) == YES\n\n extended_file_formats = [\"Tab Separated Values (*.tsv)\",\n \"Comma Separated Values (*.csv)\",\n \"Open Document Spreadsheet ODS (*.ods)\",\n \"Microsoft Excel Spreadsheet XLSX (*.xlsx)\",\n \"Legacy Microsoft Excel Spreadsheet XLS (*.xls)\",\n \"HTML (*.html)\",\n \"SDIS (*.sds)\",\n \"SQL dump file (*.sql)\"]\n\n if flag_group:\n file_formats = [\"tsv\", \"csv\", \"ods\", \"xlsx\", \"xls\", \"html\", \"sds\",\n \"sql\"] # must be in same order than extended_file_formats\n\n if QT_VERSION_STR[0] == \"4\":\n fileName, filter_ = QFileDialog(self).getSaveFileNameAndFilter(self,\n \"Export aggregated events\",\n \"\", \";;\".join(extended_file_formats))\n else:\n fileName, filter_ = QFileDialog(self).getSaveFileName(self, \"Export aggregated events\", \"\",\n \";;\".join(extended_file_formats))\n\n if not fileName:\n return\n\n outputFormat = file_formats[extended_file_formats.index(filter_)]\n if pathlib.Path(fileName).suffix != \".\" + outputFormat:\n fileName = str(pathlib.Path(fileName)) + \".\" + outputFormat\n\n else: # not grouping\n\n items = (\"Tab Separated Values (*.tsv)\",\n \"Comma Separated values (*.csv)\",\n \"Open Document Spreadsheet (*.ods)\",\n \"Microsoft Excel Spreadsheet XLSX (*.xlsx)\",\n \"Legacy Microsoft Excel Spreadsheet XLS (*.xls)\",\n \"HTML (*.html)\")\n item, ok = QInputDialog.getItem(self, \"Export events format\", \"Available formats\", items, 0, False)\n if not ok:\n return\n outputFormat = re.sub(\".* \\(\\*\\.\", \"\", item)[:-1]\n\n exportDir = QFileDialog(self).getExistingDirectory(self, \"Choose a directory to export events\",\n os.path.expanduser(\"~\"),\n options=QFileDialog.ShowDirsOnly)\n if not exportDir:\n return\n\n if outputFormat == \"sql\":\n _, _, conn = db_functions.load_aggregated_events_in_db(self.pj,\n parameters[\"selected subjects\"],\n selectedObservations,\n parameters[\"selected behaviors\"])\n try:\n with open(fileName, \"w\") as f:\n for line in conn.iterdump():\n f.write(\"{}\\n\".format(line))\n except:\n errorMsg = sys.exc_info()[1]\n logging.critical(errorMsg)\n QMessageBox.critical(None, programName, str(errorMsg), QMessageBox.Ok | QMessageBox.Default,\n QMessageBox.NoButton)\n return\n\n data_header = tablib.Dataset()\n data_header.title = \"Aggregated events\"\n header = [\"Observation id\", \"Observation date\", \"Media file\", \"Total length\", \"FPS\"]\n if INDEPENDENT_VARIABLES in self.pj:\n for idx in sorted_keys(self.pj[INDEPENDENT_VARIABLES]):\n header.append(self.pj[INDEPENDENT_VARIABLES][idx][\"label\"])\n header.extend([\"Subject\", \"Behavior\"])\n header.extend([\"Modifiers\"])\n header.extend([\"Behavior type\", \"Start (s)\", \"Stop (s)\", \"Duration (s)\", \"Comment start\", \"Comment stop\"])\n data_header.append(header)\n\n data = copy.deepcopy(data_header)\n for obsId in selectedObservations:\n d = export_observation.export_aggregated_events(self.pj, parameters, obsId)\n data.extend(d)\n\n if not flag_group:\n fileName = str(\n pathlib.Path(pathlib.Path(exportDir) / safeFileName(obsId)).with_suffix(\".\" + outputFormat))\n r, msg = export_observation.dataset_write(data, fileName, outputFormat)\n if not r:\n QMessageBox.warning(None, programName, msg, QMessageBox.Ok | QMessageBox.Default,\n QMessageBox.NoButton)\n data = copy.deepcopy(data_header)\n\n if outputFormat == \"sds\": # SDIS format\n out = \"% SDIS file created by eMOC (www.eMOC.unito.it) at {}\\nTimed <seconds>;\\n\".format(\n datetime_iso8601())\n for obsId in selectedObservations:\n # observation id\n out += \"\\n<{}>\\n\".format(obsId)\n dataList = list(data[1:])\n for event in sorted(dataList, key=lambda x: x[-4]): # sort events by start time\n if event[0] == obsId:\n behavior = event[-7]\n # replace various char by _\n for char in [\" \", \"-\", \"/\"]:\n behavior = behavior.replace(char, \"_\")\n subject = event[-8]\n # replace various char by _\n for char in [\" \", \"-\", \"/\"]:\n subject = subject.replace(char, \"_\")\n event_start = \"{0:.3f}\".format(\n round(event[-4], 3)) # start event (from end for independent variables)\n if not event[-3]: # stop event (from end)\n event_stop = \"{0:.3f}\".format(round(event[-4] + 0.001, 3))\n else:\n event_stop = \"{0:.3f}\".format(round(event[-3], 3))\n out += \"{subject}_{behavior},{start}-{stop} \".format(subject=subject, behavior=behavior,\n start=event_start, stop=event_stop)\n out += \"/\\n\\n\"\n with open(fileName, \"wb\") as f:\n f.write(str.encode(out))\n return\n\n if flag_group:\n r, msg = export_observation.dataset_write(data, fileName, outputFormat)\n if not r:\n QMessageBox.warning(None, programName, msg, QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)", "def two_in_one(obs_file,et,subevent):\r\n \r\n #in this function, the \"original time window\" talked about in the comments\r\n #refers to the start and end times that were input to create the file obs_file,\r\n #which will likely have been created using the database_extraction function\r\n \r\n #opening first output file created by operational_sep_quantities\r\n with open(obs_file, 'r') as o:\r\n out = js.load(o)\r\n \r\n #all events recorded in that output file\r\n ongoing_events = (out['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['ongoing_events'])\r\n \r\n #creating lists for values from each event\r\n end_times = [] \r\n start_times = []\r\n energy_thresholds = []\r\n flux_thresholds = []\r\n out_names = []\r\n \r\n #appending values to lists for each event\r\n for i in range(len(ongoing_events)):\r\n start_times.append(parse(ongoing_events[i]['start_time']))\r\n end_times.append(parse(ongoing_events[i]['end_time']))\r\n energy_thresholds.append(ongoing_events[i]['energy_min'])\r\n flux_thresholds.append(float(ongoing_events[i]['threshold']))\r\n \r\n #checking if there was a second event for each threshold\r\n for i in range(len(end_times)):\r\n end = end_times[i]\r\n #if the end time of an event for any threshold was a day before the last day\r\n #in the original time window given, will check if ONLY THAT THRESHOLD\r\n #had another event after the first one, using the end time of the first\r\n #event of that threshold as the new start time of the event window\r\n if end.date() < et.date():\r\n print('end time to use as new start time: %s' %end)\r\n #figuring out which threshold this end time was for\r\n flux_thresh = int(flux_thresholds[i])\r\n energy_thresh = int(energy_thresholds[i])\r\n print('extracting second event for threshold ' + str(flux_thresh) + ' MeV '\r\n + str(energy_thresh) + ' pfu')\r\n #new start time (2 days in advance bc the database_extraction function\r\n #makes the start time 2 days prior, so will cancel that out)\r\n st = end + timedelta(days=2)\r\n #thresholds in correct format\r\n thresholds = str(energy_thresh) + ',' + str(flux_thresh)\r\n print('thresholds: %s' %thresholds)\r\n #creating observation data for second event for thresholds given\r\n out_names.append(Path(cfg.obs_path) /\r\n database_extraction(st,et,instrument_chosen,subevent,\r\n thresholds = thresholds,\r\n one_thresh = True))\r\n \r\n #returns list of all new files created by this function\r\n return(out_names)", "def runEventCreation():\r\n config = CONFIG['steps']['EventCreation']\r\n ci = config['inputs']\r\n co = config['outputs']\r\n\r\n min_window_size = ci['min_window_size']\r\n change_speed_by = ci['change_speed_by']\r\n speed_ratio = ci['train_zero_speed_ratio']\r\n datetime_limit = ci['datetime_limit']\r\n csv_name_prefix = ci['csv_name_prefix']\r\n input_bucket = ci['bucket']\r\n window_event_bucket = ci['window_event_bucket']\r\n window_events_file = ci['window_events_file']\r\n\r\n output_bucket = co['bucket']\r\n event_dir = co['event_dir']\r\n filename_include = co['filename_include']\r\n\r\n minio_config = CONFIG['artifacts']['minio']\r\n minioClient = create_minio_client(minio_config[\"endpoint_url\"],\r\n access_key=minio_config[\"access_key\"],\r\n secret_key=minio_config[\"secret_key\"],\r\n secure=minio_config['secure'])\r\n\r\n boto_client = boto3.client(\"s3\",\r\n endpoint_url=minio_config[\"endpoint_url\"],\r\n aws_access_key_id=minio_config[\"access_key\"],\r\n aws_secret_access_key=minio_config[\"secret_key\"],\r\n region_name=minio_config[\"region_name\"])\r\n\r\n csv_files = get_files(input_bucket, boto_client,\r\n file_type='csv', prefix='filtered')\r\n csv_files = ['filtered/7016_2020-09-09.csv']\r\n create_window_event(files=csv_files,\r\n input_bucket=input_bucket,\r\n output_bucket=output_bucket,\r\n minio_client=minioClient,\r\n min_window_size=min_window_size,\r\n ouput_dir=event_dir,\r\n window_event_bucket=window_event_bucket,\r\n window_events_file=window_events_file,\r\n csv_name_prefix=csv_name_prefix,\r\n change_speed_by=change_speed_by,\r\n train_zero_speed_ratio=speed_ratio,\r\n datetime_limit=datetime_limit,\r\n filename_include=filename_include)", "def export_events(self, output_file, params, format='json', timezone_offset=None, add_gzip_header=False,\n compress=False, request_per_day=False, raw_stream=False, buffer_size=1024):\n # Increase timeout to 20 minutes if it's still set to default, /export requests can take a long time\n timeout_backup = self.timeout\n if self.timeout == 120:\n self.timeout = 1200\n\n request_count = 0\n if request_per_day:\n date_format = '%Y-%m-%d'\n f = datetime.datetime.strptime(params['from_date'], date_format)\n t = datetime.datetime.strptime(params['to_date'], date_format)\n delta = t - f\n request_count = delta.days\n\n for x in range(request_count + 1):\n params_copy = deepcopy(params)\n current_file = output_file\n\n if request_per_day:\n d = time.strptime(params['from_date'], date_format)\n current_day = (datetime.date(d.tm_year, d.tm_mon, d.tm_mday) + datetime.timedelta(x)).strftime(\n date_format)\n file_components = output_file.split('.')\n current_file = file_components[0] + \"_\" + current_day\n if len(file_components) > 1:\n current_file = current_file + '.' + file_components[1]\n params_copy['from_date'] = current_day\n params_copy['to_date'] = current_day\n\n events = self.query_export(params_copy, add_gzip_header=add_gzip_header, raw_stream=raw_stream)\n\n if raw_stream:\n if add_gzip_header and current_file[-3:] != '.gz':\n current_file = current_file + '.gz'\n with open(current_file, 'wb') as fp:\n shutil.copyfileobj(events, fp, buffer_size)\n else:\n if timezone_offset is not None:\n # Convert timezone_offset from hours to seconds\n timezone_offset = timezone_offset * 3600\n for event in events:\n event['properties']['time'] = int(event['properties']['time'] - timezone_offset)\n\n Mixpanel.export_data(events, current_file, format=format, compress=compress)\n\n # If we modified the default timeout above, restore default setting\n if timeout_backup == 120:\n self.timeout = timeout_backup", "def write_log_events(self, log_events):\n # Create log file name.\n # Replace / with - so LogGroup names can be written to current directory.\n file_name = self.log_group.name.replace('/', '-') + \"-\" + self.name + '-0.log'\n\n # Append LogEvents to log file.\n with open(file_name, 'a') as log_file:\n for event in log_events:\n log_file.write(event.message + '\\n')\n print('Wrote ' + str(len(log_events)) + ' LogEvents to ' + file_name)\n\n # Rotate log file if it's bigger than limit\n log_file_size = os.path.getsize(file_name)\n\n if log_file_size > self.log_file_limit:\n rotated_file_name = file_name.split('.')[0] + '-' + str(int(time.time())) + \".log\"\n print('Rotating ' + file_name + ' to ' + rotated_file_name)\n os.rename(file_name, rotated_file_name)", "def many_events(start_time,end_time,subevent_bools):\r\n \r\n #running through for each event\r\n for j in range(len(start_time)):\r\n \r\n #start, end, and subevent bool for this event\r\n st = start_time[j]\r\n et = end_time[j]\r\n subevent = bool(subevent_bools[j])\r\n \r\n #checking if start time is actually available\r\n if str(st) != 'nan':\r\n try:\r\n st = parse(st)\r\n yes_st = True\r\n except ValueError:\r\n yes_st = False\r\n else:\r\n yes_st = False\r\n \r\n #checking if end time is actually available\r\n if str(et) != 'nan':\r\n try:\r\n et = parse(et)\r\n yes_et = True\r\n except ValueError:\r\n yes_et = False\r\n else:\r\n yes_et = False\r\n \r\n #if both start and end times are available, running the code\r\n if yes_st and yes_et:\r\n #event must be after Nov. 2010 because currently no capability for\r\n #instruments in use before then - change this if you have that\r\n #capability\r\n if st > datetime(2010,9,1):\r\n try:\r\n print('got start and end times! running database extraction') \r\n database_extraction(st,et,instrument_chosen,subevent)\r\n except:\r\n continue\r\n else:\r\n print('cannot run for events before November 2010 because do not have '\r\n 'access to instruments before then')", "def export_state_events_as_textgrid(self):\n\n result, selectedObservations = self.selectObservations(MULTIPLE)\n\n if not selectedObservations:\n return\n\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations, maxTime=0,\n flagShowIncludeModifiers=False,\n flagShowExcludeBehaviorsWoEvents=False)\n\n if not plot_parameters[\"selected subjects\"] or not plot_parameters[\"selected behaviors\"]:\n return\n\n exportDir = QFileDialog(self).getExistingDirectory(self, \"Export events as TextGrid\", os.path.expanduser('~'),\n options=QFileDialog(self).ShowDirsOnly)\n if not exportDir:\n return\n\n for obsId in selectedObservations:\n\n out = \"\"\"File type = \"ooTextFile\"\nObject class = \"TextGrid\"\n\nxmin = 0\nxmax = 98.38814058956916\ntiers? <exists>\nsize = {subjectNum}\nitem []:\n\"\"\"\n subjectheader = \"\"\" item [{subjectIdx}]:\n class = \"IntervalTier\"\n name = \"{subject}\"\n xmin = {intervalsMin}\n xmax = {intervalsMax}\n intervals: size = {intervalsSize}\n\"\"\"\n\n template = \"\"\" intervals [{count}]:\n xmin = {xmin}\n xmax = {xmax}\n text = \"{name}\"\n\"\"\"\n\n flagUnpairedEventFound = False\n '''TO BE REMOVED totalMediaDuration = round(self.observationTotalMediaLength(obsId), 3)'''\n totalMediaDuration = round(project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId]), 3)\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"], selectedObservations,\n plot_parameters[\"selected behaviors\"])\n\n cursor.execute((\"SELECT count(distinct subject) FROM events \"\n \"WHERE observation = '{}' AND subject in ('{}') AND type = 'STATE' \").format(obsId,\n \"','\".join(\n plot_parameters[\n \"selected subjects\"])))\n subjectsNum = int(list(cursor.fetchall())[0][0])\n\n subjectsMin, subjectsMax = 0, totalMediaDuration\n\n out = \"\"\"File type = \"ooTextFile\"\nObject class = \"TextGrid\"\n\nxmin = {subjectsMin}\nxmax = {subjectsMax}\ntiers? <exists>\nsize = {subjectsNum}\nitem []:\n\"\"\".format(subjectsNum=subjectsNum, subjectsMin=subjectsMin, subjectsMax=subjectsMax)\n\n subjectIdx = 0\n for subject in plot_parameters[\"selected subjects\"]:\n\n subjectIdx += 1\n\n cursor.execute(\"SELECT count(*) FROM events WHERE observation = ? AND subject = ? AND type = 'STATE' \",\n (obsId, subject))\n intervalsSize = int(list(cursor.fetchall())[0][0] / 2)\n\n intervalsMin, intervalsMax = 0, totalMediaDuration\n\n out += subjectheader\n\n cursor.execute(\n \"SELECT occurence, code FROM events WHERE observation = ? AND subject = ? AND type = 'STATE' order by occurence\",\n (obsId, subject))\n\n rows = [{\"occurence\": float2decimal(r[\"occurence\"]), \"code\": r[\"code\"]} for r in cursor.fetchall()]\n if not rows:\n continue\n\n count = 0\n\n # check if 1st behavior starts at the beginning\n\n if rows[0][\"occurence\"] > 0:\n count += 1\n out += template.format(count=count, name=\"null\", xmin=0.0, xmax=rows[0][\"occurence\"])\n\n for idx, row in enumerate(rows):\n if idx % 2 == 0:\n\n # check if events not interlacced\n if row[\"key\"] != rows[idx + 1][\"key\"]:\n QMessageBox.critical(None, programName,\n \"The events are interlaced. It is not possible to produce the Praat TextGrid file\",\n QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)\n return\n\n count += 1\n out += template.format(count=count, name=row[\"key\"], xmin=row[\"occurence\"],\n xmax=rows[idx + 1][\"occurence\"])\n\n # check if difference is > 0.001\n if len(rows) > idx + 2:\n if rows[idx + 2][\"occurence\"] - rows[idx + 1][\"occurence\"] > 0.001:\n\n logging.debug(\"difference: {}-{}={}\".format(rows[idx + 2][\"occurence\"],\n rows[idx + 1][\"occurence\"],\n rows[idx + 2][\"occurence\"] - rows[idx + 1][\n \"occurence\"]))\n\n out += template.format(count=count + 1, name=\"null\", xmin=rows[idx + 1][\"occurence\"],\n xmax=rows[idx + 2][\"occurence\"])\n count += 1\n else:\n logging.debug(\"difference <=0.001: {} - {} = {}\".format(rows[idx + 2][\"occurence\"],\n rows[idx + 1][\"occurence\"],\n rows[idx + 2][\"occurence\"] -\n rows[idx + 1][\"occurence\"]))\n rows[idx + 2][\"occurence\"] = rows[idx + 1][\"occurence\"]\n logging.debug(\"difference after: {} - {} = {}\".format(rows[idx + 2][\"occurence\"],\n rows[idx + 1][\"occurence\"],\n rows[idx + 2][\"occurence\"] -\n rows[idx + 1][\"occurence\"]))\n\n # check if last event ends at the end of media file\n if rows[-1][\"occurence\"] < project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId]):\n count += 1\n out += template.format(count=count, name=\"null\", xmin=rows[-1][\"occurence\"],\n xmax=totalMediaDuration)\n\n # add info\n out = out.format(subjectIdx=subjectIdx, subject=subject, intervalsSize=count, intervalsMin=intervalsMin,\n intervalsMax=intervalsMax)\n\n try:\n with open(\"{exportDir}{sep}{obsId}.textGrid\".format(exportDir=exportDir, sep=os.sep, obsId=obsId),\n \"w\") as f:\n f.write(out)\n\n if flagUnpairedEventFound:\n QMessageBox.warning(self, programName,\n \"Some state events are not paired. They were excluded from export\", \\\n QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)\n\n self.statusbar.showMessage(\"Events exported successfully\", 10000)\n\n except:\n errorMsg = sys.exc_info()[1]\n logging.critical(errorMsg)\n QMessageBox.critical(None, programName, str(errorMsg), QMessageBox.Ok | QMessageBox.Default,\n QMessageBox.NoButton)", "def testMoreEvents(self):\n splitter = SplitterFactory()\n jobFactory = splitter(self.singleFileSubscription)\n\n jobGroups = jobFactory(events_per_job=1000,\n performance=self.performanceParams)\n\n self.assertEqual(len(jobGroups), 1)\n\n self.assertEqual(len(jobGroups[0].jobs), 1)\n\n for job in jobGroups[0].jobs:\n self.assertEqual(job.getFiles(type=\"lfn\"), [\"/some/file/name\"])\n self.assertEqual(job[\"mask\"].getMaxEvents(), self.eventsPerJob)\n self.assertEqual(job[\"mask\"][\"FirstEvent\"], 0)\n self.assertEqual(job[\"mask\"][\"LastEvent\"], 99)", "def get_events(self):\n\n print \"\\ngetting new Events\"\n path = os.path.join(self.path, 'no_consent')\n for d_cnt, date in sorted(enumerate(os.listdir(path))):\n\n if os.path.isdir(os.path.join(self.events_path, date)):\n print \"%s already processed\" % date\n continue\n\n directory = os.path.join(path, date)\n for recording in os.listdir(directory):\n if os.path.isdir(os.path.join(directory, recording)):\n\n # Can we reduce this list of objects using ROI information?\n try:\n use_objects = {}\n for region, objects in self.soma_objects.items():\n for ob, position in objects.items():\n use_objects[ob] = position\n\n ce.get_event(recording, directory, use_objects, self.config['events'])\n except:\n print \"recording: %s in: %s is broken.\" %(recording, directory)\n else:\n print \"already processed: %s\" % recording\n print \"done.\"", "def _events_tsv(events, durations, raw, fname, trial_type, overwrite=False):\n # Start by filling all data that we know into an ordered dictionary\n first_samp = raw.first_samp\n sfreq = raw.info[\"sfreq\"]\n events = events.copy()\n events[:, 0] -= first_samp\n\n # Onset column needs to be specified in seconds\n data = OrderedDict(\n [\n (\"onset\", events[:, 0] / sfreq),\n (\"duration\", durations),\n (\"trial_type\", None),\n (\"value\", events[:, 2]),\n (\"sample\", events[:, 0]),\n ]\n )\n\n # Now check if trial_type is specified or should be removed\n if trial_type:\n trial_type_map = {v: k for k, v in trial_type.items()}\n data[\"trial_type\"] = [trial_type_map.get(i, \"n/a\") for i in events[:, 2]]\n else:\n del data[\"trial_type\"]\n\n _write_tsv(fname, data, overwrite)", "def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http)\n\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n print('Getting the upcoming 20 events')\n try:\n eventsResult = service.events().list(\n calendarId='l3mvqk399k73ehoais4bu6lc74@group.calendar.google.com', timeMin=now, maxResults=20, singleEvents=True,\n orderBy='startTime').execute()\n events = eventsResult.get('items', [])\n if not events:\n print('No upcoming events found.')\n text_file = open(\"scheduledActions.txt\", \"wb\") #May want to use a check on the msg type to only overwrite calendar tasks\n # text_file.write(bytes('Updated '+now[:-8]+'\\n','UTF-8'))\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n start = start[:22] + start[-2:] #Trims the last colon\n start = datetime.datetime.strptime(start,'%Y-%m-%dT%H:%M:%S%z')\n start = int(time.mktime(start.timetuple()))\n end = event['end'].get('dateTime', event['end'].get('date'))\n end = end[:22] + end[-2:] #Trims the last colon\n end = datetime.datetime.strptime(end,'%Y-%m-%dT%H:%M:%S%z')\n end = int(time.mktime(end.timetuple()))\n description = event['description']\n if description.count(',')==5:\n desc1=description.split(\",\")[0] + \",\" + description.split(\",\")[1] + \",\" + description.split(\",\")[2]\n print(start,desc1)\n writeString=str(start)+','+desc1+\"\\n\"\n text_file.write(bytes(writeString,'UTF-8'))\n desc2=description.split(\",\")[3] + \",\" + description.split(\",\")[4] + \",\" + description.split(\",\")[5]\n print(end,desc2)\n writeString=str(end)+','+desc2+\"\\n\"\n text_file.write(bytes(writeString,'UTF-8'))\n else:\n print(start, description) #event['summary'] event['location']\n writeString=str(start)+','+description+\"\\n\"\n text_file.write(bytes(writeString,'UTF-8'))\n text_file.close()\n print('Calendar read complete.')\n except httplib2.ServerNotFoundError:\n print(\"!---- Looks like there's no internet connection just now. Wait till tomorrow.\")", "def create_foders_files(events, eventpath):\n \n len_events = len(events)\n \n for i in range(0, len_events):\n if os.path.exists(os.path.join(eventpath, events[i]['event_id'])) == True:\n \n if raw_input('Folder for -- the requested Period (min/max) ' + \\\n 'and Magnitude (min/max) -- exists in your directory.' + '\\n\\n' + \\\n 'You could either close the program and try updating your ' + \\\n 'folder OR remove the tree, continue the program and download again.' + \\\n '\\n' + 'Do you want to continue? (Y/N)' + '\\n') == 'Y':\n print '-------------------------------------------------------------'\n shutil.rmtree(os.path.join(eventpath, events[i]['event_id']))\n \n else:\n print '------------------------------------------------'\n print 'So...you decided to update your folder...Ciao'\n print '------------------------------------------------'\n sys.exit()\n\n for i in range(0, len_events):\n try:\n os.makedirs(os.path.join(eventpath, events[i]['event_id'], 'BH_RAW'))\n os.makedirs(os.path.join(eventpath, events[i]['event_id'], 'Resp'))\n os.makedirs(os.path.join(eventpath, events[i]['event_id'], 'info'))\n except Exception, e:\n pass\n \n for i in range(0, len_events):\n Report = open(os.path.join(eventpath, events[i]['event_id'], \\\n 'info', 'report_st'), 'a+')\n Report.close()\n \n \n for i in range(0, len_events):\n Exception_file = open(os.path.join(eventpath, events[i]['event_id'], \\\n 'info', 'exception'), 'a+')\n eventsID = events[i]['event_id']\n Exception_file.writelines('\\n' + eventsID + '\\n')\n \n Syn_file = open(os.path.join(eventpath, events[i]['event_id'], \\\n 'info', 'station_event'), 'a+')\n Syn_file.close()\n \n if input['time_iris'] == 'Y':\n for i in range(0, len_events):\n time_file = open(os.path.join(eventpath, events[i]['event_id'], \\\n 'info', 'iris_time'), 'a+')\n time_file.close()\n \n \n for i in range(0, len_events):\n quake_file = open(os.path.join(eventpath, events[i]['event_id'],\\\n 'info', 'quake'), 'a+')\n \n quake_file.writelines(repr(events[i]['datetime'].year).rjust(15)\\\n + repr(events[i]['datetime'].julday).rjust(15) \\\n + repr(events[i]['datetime'].month).rjust(15) \\\n + repr(events[i]['datetime'].day).rjust(15) + '\\n')\n quake_file.writelines(repr(events[i]['datetime'].hour).rjust(15)\\\n + repr(events[i]['datetime'].minute).rjust(15) + \\\n repr(events[i]['datetime'].second).rjust(15) + \\\n repr(800).rjust(15) + '\\n')\n \n quake_file.writelines(\\\n ' '*(15 - len('%.5f' % events[i]['latitude'])) + '%.5f' \\\n % events[i]['latitude'] + \\\n ' '*(15 - len('%.5f' % events[i]['longitude'])) + '%.5f' \\\n % events[i]['longitude'] + '\\n')\n quake_file.writelines(\\\n ' '*(15 - len('%.5f' % abs(events[i]['depth']))) + '%.5f' \\\n % abs(events[i]['depth']) + '\\n')\n quake_file.writelines(\\\n ' '*(15 - len('%.5f' % abs(events[i]['magnitude']))) + '%.5f' \\\n % abs(events[i]['magnitude']) + '\\n')\n quake_file.writelines(\\\n ' '*(15 - len(events[i]['event_id'])) + \\\n events[i]['event_id'] + '-' + '\\n')\n \n quake_file.writelines(repr(events[i]['t1'].year).rjust(15)\\\n + repr(events[i]['t1'].julday).rjust(15) \\\n + repr(events[i]['t1'].month).rjust(15) \\\n + repr(events[i]['t1'].day).rjust(15) + '\\n')\n quake_file.writelines(repr(events[i]['t1'].hour).rjust(15)\\\n + repr(events[i]['t1'].minute).rjust(15) + \\\n repr(events[i]['t1'].second).rjust(15) + \\\n repr(800).rjust(15) + '\\n')\n \n quake_file.writelines(repr(events[i]['t2'].year).rjust(15)\\\n + repr(events[i]['t2'].julday).rjust(15) \\\n + repr(events[i]['t2'].month).rjust(15) \\\n + repr(events[i]['t2'].day).rjust(15) + '\\n')\n quake_file.writelines(repr(events[i]['t2'].hour).rjust(15)\\\n + repr(events[i]['t2'].minute).rjust(15) + \\\n repr(events[i]['t2'].second).rjust(15) + \\\n repr(800).rjust(15) + '\\n')", "def _save_events_summary(self):\n for name, events in self._events.items():\n dict_events = [event.to_dict() for event in events]\n dump_data(dict_events, self._make_event_filename(name))", "def write(self, file_, format=format_, header=header): #@ReservedAssignment\n str_list = []\n if header and '{' in header:\n str_list.append(header.format(len(self)))\n if format == self.format_LATEX or format == self.format_LATEX2:\n for event in self:\n event.latex_datetime = str(event.datetime)[:19]\n event.latex_datetime = (event.latex_datetime[:10] + ' ' +\n event.latex_datetime[11:])\n for event in self:\n try:\n str_list.append(format.format(** event))\n except KeyError:\n event['origin_id'] = 102\n event['author'] = 'unknown'\n event['flynn_region'] = 'unknown'\n str_list.append(format.format(** event))\n #str_list.append(template.safe_substitute(event))\n output = ''.join(str_list)\n if file_:\n with open(file_, 'w') as f:\n f.write(output)\n log.info('Write events to file_ ' + file_)\n else:\n return output", "def process_events_optimised(self, events_chuck_size, data_chunk_size):\n ev = self.events\n tz = self.args[\"timezone\"]\n indexer = ev.Time.str.contains(\"\\d\\d:\\d\\d\", regex=True, na=False)\n timed_events, several_days_events = ev[indexer], ev[~indexer]\n \n if not several_days_events.empty:\n several_days_events.to_csv(\"special_events.csv\", index=False)\n self.log(\"[+] Special events were saved into standalone CSV-file\")\n else:\n self.log(\"[!] Special events not found\")\n\n self.data = pd.read_csv(self.args[\"data\"],\n iterator=True, chunksize=data_chunk_size)\n\n self.log(\"[.] Events and data linking...\")\n\n start, end = 0, events_chuck_size\n relevant_dates = pd.DataFrame()\n count = 1\n while True:\n events_slice = timed_events.iloc[start:end]\n # TODO: remove in release version\n # events_slice.to_csv('slice_{}_{}.csv'.format(start, end),\n # index=False)\n\n if events_slice.empty:\n break\n\n first_date, first_time = events_slice[['Date', 'Time']].iloc[0]\n lower_bound = convert(first_date + \" \" + first_time, mode='date')\n lower_bound += timedelta(hours=tz, minutes=-1)\n\n last_date, last_time = events_slice[['Date', 'Time']].iloc[-1]\n upper_bound = convert(last_date + \" \" + last_time, mode='date')\n upper_bound += timedelta(hours=tz, minutes=5)\n \n self.log(\"[.] Events slice bounded by [%s; %s] is in processing...\",\n lower_bound, upper_bound)\n \n for chunk in self.data:\n bounds = (lower_bound, upper_bound)\n linked, rest = self._process_chuck(\n chunk, bounds, events_slice, relevant_dates)\n\n relevant_dates = rest\n\n if linked is None:\n if relevant_dates.empty:\n err = \"[!] Warning: events from %d to %d have no data\"\n self.log(err, start + 1, end)\n break\n else:\n continue\n\n if linked.empty:\n err = \"[!] Warning: linked dataframe is empty\"\n self.log(err, severe=True)\n continue\n\n self.log(\"[+] Events from %d to %d were linked. \"\n \"Dataframe size: %d\", start + 1, end, linked.shape[0])\n\n filename = 'linked_events_{}_to_{}.csv'.format(start + 1, end)\n filename = os.path.join(self.args[\"output_folder\"], filename)\n linked.to_csv(filename, index=False)\n linked = pd.DataFrame()\n break\n\n count += 1\n start = end\n end += events_chuck_size", "def events(self):\n self.add_events(Event.objects.filter(event_end__gt=timezone.now()).order_by('event_start'))\n self.filename = 'events'", "def events(self):\n self.add_events(Event.objects.filter(event_end__gt=timezone.now()).order_by('event_start'))\n self.filename = 'events'", "def write_to_file(train_file, test_file, log_dict):\n i = 0\n train_events = []\n test_events = []\n\n for key in log_dict:\n trace = log_dict[key]\n if random.randint(0,1) == 0: # Add file to training set with 50% chance\n for e_idx in range(len(trace)):\n train_events.append(\",\".join([str(x) for x in trace[e_idx]]) + \",\" + str(key) + \",0,None\")\n else: # Add file to test set\n if random.randint(0,100) > 50: # No anomaly injection with 50% chance\n for e_idx in range(len(trace)):\n test_events.append(\",\".join([str(x) for x in trace[e_idx]]) + \",\" + str(key) + \",0,None\")\n else: # Anomaly injection\n trace, types = introduce_anomaly(trace, single=False)\n for e_idx in range(len(trace)):\n test_events.append(\",\".join([str(x) for x in trace[e_idx]]) + \",\" + str(key) + \",1,\\\"\" + str(types) + \"\\\"\")\n\n with open(train_file, \"w\") as fout:\n fout.write(\",\".join([\"Time\", \"Activity\", \"Resource\", \"Weekday\", \"Case\", \"Anomaly\", \"Type\"]) + \"\\n\")\n for e in train_events:\n fout.write(e + \"\\n\")\n\n with open(test_file, \"w\") as fout:\n fout.write(\",\".join([\"Time\", \"Activity\", \"Resource\", \"Weekday\", \"Case\", \"Anomaly\", \"Type\"]) + \"\\n\")\n for e in test_events:\n fout.write(e + \"\\n\")", "def export_string_events(self):\n\n # ask user observations to analyze\n result, selectedObservations = self.selectObservations(MULTIPLE)\n if not selectedObservations:\n return\n\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations, maxTime=0,\n flagShowIncludeModifiers=True,\n flagShowExcludeBehaviorsWoEvents=False)\n\n if not plot_parameters[\"selected subjects\"] or not plot_parameters[\"selected behaviors\"]:\n return\n\n fn = QFileDialog(self).getSaveFileName(self, \"Export events as strings\", \"\",\n \"Events file (*.txt *.tsv);;All files (*)\")\n fileName = fn[0] if type(fn) is tuple else fn\n\n if fileName:\n\n response = dialog.MessageDialog(programName, \"Include observation(s) information?\", [YES, NO])\n\n try:\n with open(fileName, \"w\", encoding=\"utf-8\") as outFile:\n for obsId in selectedObservations:\n # observation id\n outFile.write(\"\\n# observation id: {}\\n\".format(obsId))\n # observation descrition\n outFile.write(\"# observation description: {}\\n\".format(\n self.pj[OBSERVATIONS][obsId][\"description\"].replace(os.linesep, \" \")))\n # media file name\n if self.pj[OBSERVATIONS][obsId][TYPE] in [MEDIA]:\n outFile.write(\"# Media file name: {0}{1}{1}\".format(\", \".join([os.path.basename(x)\n for x in\n self.pj[OBSERVATIONS]\n [obsId]\n [FILE][PLAYER1]]),\n os.linesep))\n if self.pj[OBSERVATIONS][obsId][TYPE] in [LIVE]:\n outFile.write(\"# Live observation{0}{0}\".format(os.linesep))\n\n # independent variables\n if \"independent_variables\" in self.pj[OBSERVATIONS][obsId]:\n outFile.write(\"# Independent variables\\n\")\n\n # rows.append([\"variable\", \"value\"])\n for variable in self.pj[OBSERVATIONS][obsId][\"independent_variables\"]:\n outFile.write(\"# {0}: {1}\\n\".format(variable,\n self.pj[OBSERVATIONS][obsId][\n \"independent_variables\"][variable]))\n outFile.write(\"\\n\")\n\n # selected subjects\n for subj in plot_parameters[\"selected subjects\"]:\n if subj:\n subj_str = \"\\n# {}:\\n\".format(subj)\n else:\n subj_str = \"\\n# No focal subject:\\n\"\n outFile.write(subj_str)\n\n out = self.create_behavioral_strings(obsId, subj, plot_parameters)\n if out:\n outFile.write(out + \"\\n\")\n\n except:\n logging.critical(sys.exc_info()[1])\n QMessageBox.critical(None, programName, str(sys.exc_info()[1]), QMessageBox.Ok | QMessageBox.Default,\n QMessageBox.NoButton)", "def collect_events(namespace, output_dir, k8s_cli, mode=MODE_RESTRICTED):\n if mode != MODE_ALL:\n logger.warning('Cannot collect events in \"restricted\" mode - skipping events collection')\n return\n # events need -n parameter in kubectl\n if not namespace:\n logger.warning(\"Cannot collect events without namespace - \"\n \"skipping events collection\")\n return\n cmd = \"{} get events -n {} -o wide\".format(k8s_cli, namespace)\n collect_helper(output_dir, cmd=cmd,\n file_name=\"events\", resource_name=\"events\", namespace=namespace)\n\n # We get the events in YAML format as well since in YAML format they are a bit more informative.\n output = run_get_resource_yaml(namespace, \"Event\", k8s_cli)\n with open(os.path.join(output_dir, \"Event.yaml\"), \"w+\", encoding='UTF-8') as file_handle:\n file_handle.write(output)", "def generate(start_date, episodes, steps, output_file):\n header = ','.join(FIELDS) + '\\n'\n with open(output_file, 'w') as fd:\n fd.write(header)\n data_arrays = []\n first_dp = generate_data_point(start_date)\n data_arrays.append(first_dp)\n\n interval = int(1440/steps)\n cur_ts = increment_ts(start_date, interval)\n\n while step_diff(start_date, cur_ts, interval) < steps*episodes:\n dp_tmp = generate_data_point(cur_ts)\n data_arrays.append(dp_tmp)\n cur_ts = increment_ts(cur_ts, interval)\n\n for dp in data_arrays:\n row = ','.join(dp) + '\\n'\n fd.write(row)", "def output_files(self):\n o = []\n if 'unweighted' in self.event_types:\n o.append(self.name + \"_unweighted_events.lhe.gz\")\n if 'weighted' in self.event_types:\n o.append(self.name + \"_events.lhe.gz\")\n return o", "def test_log_filenames_multiple_date_in_past(self):\n time_lower = datetime.datetime.now() - datetime.timedelta(seconds=7210)\n time_upper = time_lower + datetime.timedelta(seconds=20)\n (tracks, statuses) = self.app.log_filenames(\n [self.track_path('silence.mp3')]*5,\n timestamp='2 hours ago'\n )\n self.assertEqual(len(tracks), 5)\n self.assertEqual(self.get_track_count(), 5)\n track_objs = []\n for (idx, track) in enumerate(tracks):\n with self.subTest(idx=idx):\n track_obj = self.get_track_by_id(track.pk)\n track_objs.append(track_obj)\n self.assertGreaterEqual(track_obj['timestamp'], time_lower)\n self.assertLess(track_obj['timestamp'], time_upper)\n if idx > 0:\n self.assertGreater(track_obj['timestamp'],\n track_objs[idx-1]['timestamp'])", "def split_timeseries_and_save(self, window_length=45, zero_padding=True, tmp_dir=os.path.join(\"..\", \"..\", \"data\", \"interim\")):\n #TODO: split from task event file\n\n label_df = pd.DataFrame(columns=[\"label\", \"filename\"])\n out_file = os.path.join(tmp_dir, \"{}_{:03d}.npy\")\n\n # Split the timeseries\n for ii in range(len(self.valid_ts_filepaths)):\n ts = self.get_valid_timeseries([ii])[0]\n ts_duration = ts.shape[0]\n rem = ts_duration % window_length\n if rem == 0:\n n_splits = int(ts_duration / window_length)\n else:\n if zero_padding:\n n_splits = np.ceil(ts_duration / window_length)\n pad_size = int(n_splits*window_length - ts_duration)\n pad_widths = [(0, pad_size), (0, 0)]\n ts = np.pad(ts, pad_width=pad_widths)\n else:\n ts = ts[:(ts_duration-rem), :]\n n_splits = np.floor(ts_duration / window_length)\n split_ts = np.split(ts, n_splits)\n\n # tmp = [split_timeseries(t,n_timepoints=n_timepoints) for t in timeseries]\n # for ts in tmp:\n # split_ts = split_ts + ts\n\n # #keep track of the corresponding labels\n # n = int(timeseries[0].shape[0]/n_timepoints)\n # split_labels = []\n # for l in labels:\n # split_labels.append(np.repeat(l,n))\n\n # #add a label for each split\n # split_labels.append(list(range(n))*len(timeseries))\n # return split_ts, split_labels", "def main():\n\n f = open(eventsfile, 'r')\n lines = f.readlines()\n numcounter = 0\n counter = 0\n fullcounter = 0\n movielist = []\n movielists =[]\n timestamp_list = []\n filteredlist = [] \n startdate = \"2020-02-26\"\n \n for line in lines:\n TAPES = line.split('\\t')\n if int(TAPES[2]) == 1 or int(TAPES[2]) == 2:\n filteredlist.append(line)\n \n for newline in filteredlist:\n TAPES = newline.split('\\t')\n fullcounter +=1\n if int(TAPES[2]) == 2:\n timestamp_list.append(0)\n continue\n startdate2 = startdate.split(\"-\")[1] + \"/\" + startdate.split(\"-\")[2] + \"/\" + startdate.split(\"-\")[0]\n dateplustime = startdate2 + TAPES[0][0:len(TAPES[0])]\n thistime = faststrptime(dateplustime)\n unixtimestamp = datetime.datetime.timestamp(thistime)\n timestamp_list.append(int(unixtimestamp))\n\n i = 0 \n for element in timestamp_list:\n\n if i < (len(timestamp_list)-1) and timestamp_list[i+(counter-i)]-timestamp_list[i] >= 3600:\n counter += 1\n i = counter\n movielist.append(counter)\n \n if len(movielist) <= 15:\n numcounter = 0\n j = 0\n for step in movielist:\n movielists[len(movielists)-1].append(movielist[j])\n j += 1\n movielist = []\n continue \n else:\n movielists.append(movielist)\n movielist = []\n numcounter = 0\n continue\n\n if i < (len(timestamp_list)-1) and timestamp_list[i+1]-timestamp_list[i] >= 3600:\n counter += 1\n i = counter\n movielist.append(counter)\n\n if len(movielist) <= 15:\n numcounter = 0\n j = 0\n for step in movielist:\n movielists[len(movielists)-1].append(movielist[j])\n j += 1\n movielist = []\n continue\n else:\n movielists.append(movielist)\n movielist = []\n numcounter = 0\n continue\n\n counter += 1\n numcounter += 1\n if element != 0:\n movielist.append(counter)\n i += 1\n \n if numcounter == 30:\n numcounter = 0\n movielists.append(movielist)\n movielist = []\n \n if i > (len(timestamp_list)-1):\n movielists.append(movielist)\n movielist = []\n numcounter = 0\n \n numendlists = counter - fullcounter\n first = len(movielists)-numendlists\n last = len(movielists)\n del movielists[first:last]\n \n for x in movielists:\n for y in x:\n if int(filenumber) == y:\n movielist = x\n\n modename = str(movielist[0]) + \"to\" + str(movielist[len(movielist)-1])\n modefilename = \"mode_\" + modename + \".png\"\n try:\n imread(modefilename)\n except:\n imageMode(modename,movielist)\n\n e = loadmodeImage(modefilename)\n \n roimask = np.zeros((ydim,xdim))\n f = open(roisfile, 'r')\n lines = f.readlines()\n i = 1\n i2 = 0\n for line in lines:\n try:\n print(int(line.split(' ')[0]))\n except ValueError:\n i2 += 1\n continue\n minx = int(line.split(' ')[0])\n miny = int(line.split(' ')[1])\n maxx = int(line.split(' ')[2])\n maxy = int(line.split(' ')[3])\n roimask[int(miny):int(maxy),int(minx):int(maxx)] = i\n i += 1\n numberofwells = i-1\n numberofcols = int(i2/2)\n numberofrows = int(numberofwells/numberofcols)\n roimaskweights = convertMaskToWeights(roimask)\n\n cap = cv2.VideoCapture(videoStream)\n\n cap.set(3,roimask.shape[1])\n cap.set(4,roimask.shape[0])\n \n ret,frame = cap.read()\n storedImage = np.array(e * 255, dtype = np.uint8)\n storedMode = Blur(storedImage)\n storedFrame = grayBlur(frame)\n cenData = np.zeros([ int(saveFreq), len(np.unique(roimaskweights))*2 -2])\n pixData = np.zeros([ int(saveFreq), len(np.unique(roimaskweights))])\n i = 0;\n totalFrames = 0\n while(cap.isOpened()):\n ret,frame = cap.read()\n if ret == False:\n break\n currentFrame = grayBlur(frame)\n diffpix = diffImage(storedFrame,currentFrame,pixThreshold)\n diff = trackdiffImage(storedMode,currentFrame,pixThreshold)\n diff.dtype = np.uint8\n contours,hierarchy = cv2.findContours(diff, cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)\n MIN_THRESH = 20.0\n MIN_THRESH_P = 20.0\n roi_dict = {}\n for r in range(0,numberofwells):\n roi_dict[r+1] = []\n for cs in range(0,len(contours)):\n if cv2.contourArea(contours[cs]) < 1.0:\n continue\n if cv2.arcLength(contours[cs],True) < 1.0:\n continue\n if cv2.contourArea(contours[cs]) > MIN_THRESH or cv2.arcLength(contours[cs],True) > MIN_THRESH_P:\n M = cv2.moments(contours[cs])\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n area = cv2.contourArea(contours[cs])\n perim = cv2.arcLength(contours[cs],True)\n if int(roimask[cY,cX]) == 0:\n continue\n if not roi_dict[int(roimask[cY,cX])]:\n roi_dict[int(roimask[cY,cX])].append((area*perim,cX,cY))\n else:\n if roi_dict[int(roimask[cY,cX])][0][0] < area*perim:\n roi_dict[int(roimask[cY,cX])][0] = (area*perim,cX,cY)\n\n pixcounts = []\n pixcounts = np.bincount(roimaskweights, weights=diffpix.ravel())\n pixData[i,:] = np.hstack((pixcounts))\n counts = []\n keys = roi_dict.keys()\n keys = sorted(keys)\n for k in keys:\n x = -10000\n y = -10000\n if roi_dict[k]:\n x = roi_dict[k][0][1]\n y = roi_dict[k][0][2]\n counts.append(x)\n counts.append(y)\n cv2.line(storedImage,(x,y),(x,y),(255,255,255),2)\n if i == 284:\n cv2.imwrite(videoStream + '_trackedimagewithlines_' + str(i) + \".png\", storedImage)\n cenData[i,:] = np.asarray(counts)\n totalFrames += 1\n storedFrame = currentFrame\n i += 1\n\n file = open(videoStream + \".centroid2\",'w')\n for x in range(0,frameRate):\n for y in range(0,numberofwells*2):\n file.write(str(int(cenData[x,:][y])) + '\\n')\n pixData = pixData[:i,:]\n pixData = pixData[:,1:] \n file = open(videoStream + \".motion2\",'w')\n for x in range(0,frameRate):\n for y in range(0,numberofwells):\n file.write(str(int(pixData[x,:][y])) + '\\n')\n\n cap.release()\n cv2.destroyAllWindows()\n \n try:\n image = Image.open('lastframe.png')\n except:\n makenumROIsimage()", "def sample_times():\n\tthe_times = []\n\tday = config.window_start_date\n\twhile day <= config.window_end_date:\n\t\t# times from start of window on day to end of window \n\t\ttime = config.tz.localize( datetime.combine( \n\t\t\tday, config.window_start_time \n\t\t) )\n\t\tend_time = config.tz.localize( datetime.combine( \n\t\t\tday, config.window_end_time \n\t\t) )\n\t\twhile time < end_time: # While still in the time window\n\t\t\tthe_times.append( time )\n\t\t\ttime += timedelta(minutes=1)\n\t\tday += timedelta(days=1)\n\treturn the_times", "def get_tagged_events():\n\n f = open('event_info.txt', 'w+')\n f.write('')\n f.close()\n\n for category in MEETUP_TAGS:\n events_added = 0\n days = 5\n while events_added < NUM_EVENTS:\n\n urls = set()\n\n today = datetime.date.today()\n tomorrow = today\n\n tomorrow = tomorrow + datetime.timedelta(days=days)\n\n # https://www.meetup.com/find/events/arts-culture/?allMeetups=false&radius=5&userFreeform=New+York%2C+NY&mcId=z10025&month=4&day=20&year=2018&eventFilter=all\n\n url = 'www.meetup.com/find/events/{}/?allMeetups=true&radius=20 \\\n &userFreeform=New+York%2C+NY&mcId=c10001&mcName=New+York%2C+NY \\\n &month={}&day={}&year={}'.format(category,\n tomorrow.month,\n tomorrow.day,\n tomorrow.year)\n\n r = requests.get('https://' + url)\n print('https://' + url)\n data = r.text\n soup = BeautifulSoup(data)\n\n for link in soup.find_all('a'):\n href = link.get('href')\n if '/events/' in href and '/find/' not in href:\n urls.add(href)\n\n if not urls:\n break\n\n for url in urls:\n os.system('python retrieval.py ' + url + ' ' + category)\n events_added += 1\n if events_added > NUM_EVENTS:\n break\n\n print('Finished ' + str(days))\n days += 1", "def main():\n\n # parses arguments\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-s', action='store', dest='start_index', type=int,\n help='The starting index for events. Default is 0')\n\n parser.add_argument('-e', action='store', dest='end_index', type=int,\n help='The starting index for events. Default is 5,000')\n\n results = parser.parse_args()\n\n start_index = results.start_index or 0\n\n end_index = results.end_index or 5000\n\n scraper = Scraper()\n\n # these are the event column titles from the sample import csv given by localist\n event_column_titles = [\n 'Title','Description','Date From','Date To','Recurrence','Start Time','End Time',\n 'Location','Address','City','State','Event Website','Room','Keywords','Tags',\n 'Photo URL','Ticket URL','Cost','Hashtag','Facebook URL','Group','Department',\n 'Allow User Activity','Allow User Attendance','Visibility','Featured Tabs',\n 'Sponsored','Venue Page Only','Exclude From Trending','Event Types','Invited Audience', 'Original URL',\n 'Location Details'\n ]\n\n out_stream = open('event_import.csv', 'w')\n\n writer = Writer(event_column_titles, out_stream)\n\n writer.write_headers()\n\n # iterates through the specified event numbers and scrapes each one and writes\n # it to the output file\n for i in range(start_index, end_index + 1):\n current_url = 'http://test-ucscevents.pantheonsite.io/event/' + str(i)\n print(\"processing url: \" + current_url)\n r = requests.get(current_url)\n if r.status_code != requests.codes.ok:\n print(' 404')\n else:\n soup = get_soup_from_url(current_url)\n events = scraper.scrape_event(soup)\n for event in events:\n event['Original URL'] = current_url\n\n writer.write_object(event) # event written to output file here\n\n out_stream.close()", "def export_events_ioe(self):\n for event in self.positive_ids:\n pos_trans = ','.join(sorted(self.positive_ids[event]))\n all_trans = ','.join(list(set(sorted(self.positive_ids[event] + self.negative_ids[event]))))\n full_event = '{};{}:{}'.format(self.gene.name, self.etype, event)\n\n yield ('{}\\t{}\\t{}\\t{}\\t{}\\n'.format(self.gene.chr, self.gene.name, full_event,\n pos_trans, all_trans),\n self.etype)", "def formatEvents(eventList):\r\n firstEv = True\r\n for ev in eventList:\r\n evStr = json.dumps(ev)\r\n xmlStr = \"<event><data>%s</data></event>\" % xml.sax.saxutils.escape(evStr)\r\n print xmlStr\r\n if (firstEv):\r\n firstEv = False\r\n logging.info(\"cphalo: first event in batch: %s\" % xmlStr)", "def get_Events(input, request):\n \n t_event_1 = datetime.now()\n \n global events\n \n Period = input['min_date'].split('T')[0] + '_' + \\\n input['max_date'].split('T')[0] + '_' + \\\n str(input['min_mag']) + '_' + str(input['max_mag'])\n eventpath = os.path.join(input['datapath'], Period)\n \n if os.path.exists(eventpath) == True:\n print '--------------------------------------------------------'\n \n if raw_input('Folder for requested Period:' + '\\n' + \\\n str(eventpath) + \\\n '\\n' + 'exists in your directory.' + '\\n\\n' + \\\n 'You could either:' + '\\n' + 'N: Close the program and try the ' + \\\n 'updating mode.' + '\\n' + \\\n 'Y: Remove the tree, continue the program ' + \\\n 'and download again.' + \\\n '\\n\\n' + 'Do you want to continue? (Y/N)' + '\\n') == 'Y':\n print '--------------------------------------------------------'\n shutil.rmtree(eventpath)\n os.makedirs(eventpath)\n \n else:\n print '--------------------------------------------------------'\n print 'So...you decided to update your folder...Ciao'\n print '--------------------------------------------------------'\n sys.exit()\n \n else:\n os.makedirs(eventpath)\n \n events = events_info(request)\n \n os.makedirs(os.path.join(eventpath, 'EVENT'))\n len_events = len(events)\n \n print 'Length of the events found based on the inputs: ' + \\\n str(len_events) + '\\n'\n \n for i in range(0, len_events):\n print \"Event No:\" + \" \" + str(i+1)\n print \"Date Time:\" + \" \" + str(events[i]['datetime'])\n print \"Depth:\" + \" \" + str(events[i]['depth'])\n print \"Event-ID:\" + \" \" + events[i]['event_id']\n try:\n print \"Flynn-Region:\" + \" \" + events[i]['flynn_region']\n except Exception, e:\n print \"Flynn-Region:\" + \" \" + \"NONE\"\n print \"Latitude:\" + \" \" + str(events[i]['latitude'])\n print \"Longitude:\" + \" \" + str(events[i]['longitude'])\n print \"Magnitude:\" + \" \" + str(events[i]['magnitude'])\n print \"-------------------------------------------------\"\n \n Event_cat = open(os.path.join(eventpath, 'EVENT', 'EVENT-CATALOG'), 'a+')\n Event_cat.writelines(str(Period) + '\\n')\n Event_cat.writelines('-------------------------------------' + '\\n')\n Event_cat.writelines('Information about the requested Events:' + '\\n\\n')\n Event_cat.writelines('Number of Events: ' + str(len_events) + '\\n')\n Event_cat.writelines('min datetime: ' + str(input['min_date']) + '\\n')\n Event_cat.writelines('max datetime: ' + str(input['max_date']) + '\\n')\n Event_cat.writelines('min magnitude: ' + str(input['min_mag']) + '\\n')\n Event_cat.writelines('max magnitude: ' + str(input['max_mag']) + '\\n')\n Event_cat.writelines('min latitude: ' + str(input['evlatmin']) + '\\n')\n Event_cat.writelines('max latitude: ' + str(input['evlatmax']) + '\\n')\n Event_cat.writelines('min longitude: ' + str(input['evlonmin']) + '\\n')\n Event_cat.writelines('max longitude: ' + str(input['evlonmax']) + '\\n')\n Event_cat.writelines('min depth: ' + str(input['min_depth']) + '\\n')\n Event_cat.writelines('max depth: ' + str(input['max_depth']) + '\\n')\n Event_cat.writelines('-------------------------------------' + '\\n\\n')\n Event_cat.close()\n \n \n for j in range(0, len_events):\n Event_cat = open(os.path.join(eventpath, 'EVENT', 'EVENT-CATALOG'), 'a')\n Event_cat.writelines(\"Event No: \" + str(j) + '\\n')\n Event_cat.writelines(\"Event-ID: \" + str(events[j]['event_id']) + '\\n')\n Event_cat.writelines(\"Date Time: \" + str(events[j]['datetime']) + '\\n')\n Event_cat.writelines(\"Magnitude: \" + str(events[j]['magnitude']) + '\\n')\n Event_cat.writelines(\"Depth: \" + str(events[j]['depth']) + '\\n')\n Event_cat.writelines(\"Latitude: \" + str(events[j]['latitude']) + '\\n')\n Event_cat.writelines(\"Longitude: \" + str(events[j]['longitude']) + '\\n')\n \n try:\n Event_cat.writelines(\"Flynn-Region: \" + \\\n str(events[j]['flynn_region']) + '\\n')\n \n except Exception, e:\n Event_cat.writelines(\"Flynn-Region: \" + 'None' + '\\n')\n \n Event_cat.writelines('-------------------------------------' + '\\n')\n Event_cat.close()\n \n Event_file = open(os.path.join(eventpath, 'EVENT', 'event_list'), 'a+')\n pickle.dump(events, Event_file)\n Event_file.close()\n \n print 'Events are saved!'\n \n print 'Length of events: ' + str(len_events) + '\\n'\n \n t_event_2 = datetime.now()\n t_event = t_event_2 - t_event_1\n \n print 'Time for getting and saving the events:'\n print t_event\n \n return events", "def filter_windows(sliding_windows_file, genes_file, output_file):\n\n\t# Read sliding windows file and create a list in the form\n\t# genes = [('gene1', 1000, 2000), ('gene2', 4000, 45000)]\n\tgenes = []\t\t# this could be a dictionary but I prefer not\n\tfor line in genes_file:\n\t\tline = line.strip()\n\n\t\tif line and not line.startswith('#'):\t\t# if line is not empty and not a comment\n#\t\tif line and re.match('\\d+', line):\n\t\t\tlogging.debug((\"line: %s\" %line))\n\t\t\tfields = line.split()\t\t# it is better to use the default splitting algorithm here.\n\t\t\t\t\t\t\t\t\t\t# read help(''.split)\t\n\n\t\t\tgene_name = fields[0]\n\t\t\tlogging.debug((\"fields: %s\" %fields))\n\t\t\tstart = int(fields[2])\n\t\t\tend = int(fields[3].strip())\t\t# remove \\n\\r, like chomp\n\t\t\tgenes.append((gene_name, start, end))\n\t\t\t\n#\tlogging.debug((\"genes :\", genes))\t\t# print the contents of genes, if level=loggin.DEBUG\n\n\t# read sliding windows file, and select windows that fall in genes\n\toutput = '#gene_name, gene_start, gene_end, window_start, window_middle, window_end, population, number, score\\n'\n\toutputlineskeleton = \"%s\\t%d\\t%d\\t%d\\t%d\\t%d\\t%s\\t%s\\t%s\\n\"\t# %(gene_name, gene_start, gene_end, window_start, window_middle, window_end, population, number, score)\n\n\tfor line in sliding_windows_file:\n\t\tline = line.strip()\t\t# remove trailing characters (like chomp)\n\t\tif line and not line.startswith('#'):\n\t\t\twindow_fields = line.split()\n\n#\t\t\tlogging.debug(window_fields)\n\t\t\twindow_start = int(window_fields[0])\n\t\t\twindow_middle = int(window_fields[2])\n\t\t\twindow_end = int(window_fields[1])\n#\t\t\tgene = window_fields[3]\n\t\t\tpopulation = window_fields[4]\n\t\t\tnumber = window_fields[5]\n\t\t\tscore = window_fields[6]\n\n\t\t\tfor gene in genes:\n\t\t\t\tgene_start = int(gene[1])\n\t\t\t\tgene_end = int(gene[2])\n\t\t\t\tgene_name = gene[0]\n\t\t\t\t# if window_start is comprised between gene_end and gene_start\n\t\t\t\tif gene_end > window_start >= gene_start:\n\t\t\t\t\tlogging.debug(\"This window starts inside gene %s (%s, %s)\" %(gene[0], gene_start, gene_end))\n\t\t\t\t\tlogging.debug(line)\n\t\t\t\t\toutput += outputlineskeleton % (gene_name, gene_start, gene_end, window_start, window_middle, window_end, population, number, score)\n\t\t\t\telif gene_end >= window_end > gene_start:\n\t\t\t\t\tlogging.debug(\"This window ends inside gene %s (%s, %s)\" %(gene[0], gene_start, gene_end))\n\t\t\t\t\tlogging.debug(line)\n\t\t\t\t\toutput += outputlineskeleton % (gene_name, gene_start, gene_end, window_start, window_middle, window_end, population, number, score)\n\t\n\tlogging.debug(output)\n\toutput_file.write(output)\n\toutput_file.seek(0)\n\treturn output_file", "def generate_fire_time_series(self):\r\n self.fire_events =[]\r\n event = self.generate_fire_recurrence()\r\n end_event = event + 365.0\r\n self.fire_events.append([event, end_event])\r\n t = 0\r\n i = 0\r\n while t <= self.total_run_time:\r\n fire = self.generate_fire_recurrence()\r\n start_fire = self.fire_events[i][0] + (fire)\r\n end_fire = start_fire + (365.0) \r\n self.fire_events.append([start_fire, end_fire])\r\n t += end_fire\r\n i+=1", "def create_export_files(n,input_choice,timing,min_hull_per):\n\n\n\texists = os.path.isdir('analysis')\n\tif exists:\n\t\tf = open('analysis/results.csv','a',newline='')\n\t\tresults = csv.writer(f)\n\telse:\n\t\tos.mkdir('analysis')\n\t\tf = open('analysis/results.csv','w',newline='')\n\t\tresults = csv.writer(f)\n\t\tresults.writerow(['Algo','Size of Input','Min. Hull Pts Per','Type of Input','Timing'])\n\n\n\tresults.writerow(['Graham Scan',n,min_hull_per,input_choice,timing])", "def write_event_for_interval(self):\n\n if self.num_events_in_interval > 0:\n # We have some events, so compute the sensor values and labels from the window:\n sensor_vals_to_write = self.get_sensor_values_for_out_event()\n label_vals_to_write = self.get_label_vals_for_out_event()\n\n self.write_out_event(self.next_out_stamp, sensor_vals_to_write, label_vals_to_write)\n else:\n # We didn't have any events, so use the last one we saw, if any:\n if self.last_seen_input_event is not None:\n # We have a recently-seen event, so just use that, but update the stamp:\n repeated_last_seen_event = dict(self.last_seen_input_event)\n repeated_last_seen_event[self.stamp_field] = self.next_out_stamp\n\n self.out_data.write_row_dict(repeated_last_seen_event)\n else:\n # We don't have any events to write, so just skip this time\n pass", "def test_shotgun():\n events = [['Event', '2017-11-22T11:30:00-08:00', '2017-11-22T12:10:00-08:00'],\n ['Event', '2017-11-22T12:00:00-08:00', '2017-11-22T13:00:00-08:00'],\n ['Event', '2017-11-22T12:30:00-08:00', '2017-11-22T13:30:00-08:00'],\n ['Event', '2017-11-23T10:00:00-08:00', '2017-11-23T11:20:00-08:00'],\n ['Event', '2017-11-23T14:00:00-08:00', '2017-11-23T15:00:00-08:00'],\n ['Event', '2017-11-24T14:30:00-08:00', '2017-11-25T19:00:00-08:00'],\n ['Event', '2017-11-25T12:00:00-08:00', '2017-11-25T13:00:00-08:00'],\n ['Event', '2017-11-26T11:30:00-08:00', '2017-11-26T12:10:00-08:00'],\n ['Event', '2017-11-26T12:30:00-08:00', '2017-11-26T13:30:00-08:00'],\n ['Event', '2017-11-28T10:00:00-08:00', '2017-11-28T11:20:00-08:00'],\n ['Event', '2017-11-28T12:00:00-08:00', '2017-11-28T13:00:00-08:00'],\n ['Event', '2017-11-28T14:00:00-08:00', '2017-11-28T15:00:00-08:00']]\n\n freetimes, _ = free(events, 9, 0, 17, 0, day_range, 30)\n fmt_freetime = output_format(freetimes)\n print(fmt_freetime)\n for i in fmt_freetime:\n print(i)\n assert fmt_freetime == ['Tue, Nov 21, 9:00 am to Tue, Nov 21, 5:00 pm.',\n 'Wed, Nov 22, 9:00 am to Wed, Nov 22, 11:30 am.',\n 'Wed, Nov 22, 1:30 pm to Wed, Nov 22, 5:00 pm.',\n 'Thu, Nov 23, 9:00 am to Thu, Nov 23, 10:00 am.',\n 'Thu, Nov 23, 11:20 am to Thu, Nov 23, 2:00 pm.',\n 'Thu, Nov 23, 3:00 pm to Thu, Nov 23, 5:00 pm.',\n 'Fri, Nov 24, 9:00 am to Fri, Nov 24, 2:30 pm.',\n 'Sun, Nov 26, 9:00 am to Sun, Nov 26, 11:30 am.',\n 'Sun, Nov 26, 1:30 pm to Sun, Nov 26, 5:00 pm.',\n 'Mon, Nov 27, 9:00 am to Mon, Nov 27, 5:00 pm.',\n 'Tue, Nov 28, 9:00 am to Tue, Nov 28, 10:00 am.',\n 'Tue, Nov 28, 11:20 am to Tue, Nov 28, 12:00 pm.',\n 'Tue, Nov 28, 1:00 pm to Tue, Nov 28, 2:00 pm.']", "def event50():\n header(50)\n skip_if_event_flag_on(86, 909)\n for chunk_start, chunk_end, initial_flag in (\n (1000, 1029, 1000),\n (1030, 1059, 1030),\n (1060, 1089, 1060),\n (1090, 1109, 1090),\n (1110, 1119, 1110),\n (1120, 1139, 1120),\n (1140, 1169, 1140),\n (1170, 1189, 1170),\n (1190, 1209, 1202),\n (1210, 1219, 1210),\n (1220, 1229, 1220),\n (1230, 1239, 1230),\n (1240, 1249, 1240),\n (1250, 1259, 1250),\n (1270, 1279, 1270),\n (1280, 1289, 1280),\n (1290, 1309, 1290),\n (1310, 1319, 1310),\n (1320, 1339, 1320),\n (1340, 1359, 1340),\n (1360, 1379, 1360),\n (1380, 1399, 1380),\n (1400, 1409, 1400),\n (1410, 1419, 1410),\n (1420, 1429, 1420),\n (1430, 1459, 1430),\n (1460, 1489, 1460),\n (1490, 1539, 1490),\n (1540, 1569, 1540),\n (1570, 1599, 1570),\n (1600, 1619, 1600),\n (1620, 1639, 1620), # Trusty Patches\n (1640, 1669, 1640),\n (1670, 1679, 1670),\n (1690, 1699, 1690),\n (1700, 1709, 1700),\n (1710, 1729, 1710),\n (1760, 1769, 1760),\n (1770, 1779, 1770),\n (1780, 1789, 1780),\n (1820, 1839, 1820),\n (1840, 1859, 1840),\n (1860, 1869, 1860),\n (1870, 1889, 1870),\n (1890, 1899, 1890), # (New) Havel the Rock\n (11412020, 11412029, 11412020), # (New) Lost Daughter of Izalith\n (11412050, 11412059, 11412050), # (New) Xanthous King Jeremiah\n ):\n skip_if_event_flag_range_not_all_off(1, chunk_start, chunk_end)\n flag.enable(initial_flag)\n\n skip_if_event_flag_on(24, 909)\n for new_game_flag in range(11807020, 11807241, 10):\n if new_game_flag in (11807140, 11807150, 11807160):\n continue # not used\n flag.enable(new_game_flag)\n for new_game_flag_dlc in range(11217060, 11217091, 10):\n flag.enable(new_game_flag_dlc)\n\n # Some first-time initialization of flags (once per playthrough).\n end_if_event_flag_on(909)\n flag.enable(909) # NPC initialization is done for this playthrough.\n flag.enable(814) # Not sure.\n flag.enable(50006071) # Rhea won't drop Pendant.\n flag.enable(50006080) # Petrus won't drop Ivory Talisman (because he hasn't killed Rhea).\n flag.enable(50006771) # Jeremiah won't drop Chthonic Spark (because he didn't steal it yet).\n flag.enable(51300992) # Vamos won't drop Chthonic Spark (because he hasn't been given it yet).\n flag.enable(11607030) # Oswald won't sell Velka's Rapier (because you haven't made a pact with Velka yet).\n flag.enable(51100371) # Profane Ember drop is disabled (because you haven't made a pact with Velka yet).\n flag.enable(51300220) # Reaper's Rune won't appear.\n flag.enable(51300221) # Ransacker's Rune won't appear.\n\n # Disable Etched Ring pickup in starting cell unless it is NG+ and the player doesn't have one already.\n if_new_game_count_greater_than_or_equal(7, 1)\n if_player_does_not_have_ring(7, RING.EtchedRing)\n end_if_condition_true(7)\n flag.enable(51810001)", "def writeFile(self, name, folder, collected_entry_list=[]):\n file_io = open(os.path.join(folder, \"system_%s.csv\" % name), \"w\")\n csv_output = csv.writer(file_io)\n csv_output.writerow([\"time\", \"entry\"])\n for collected_entry in collected_entry_list:\n csv_output.writerow([collected_entry[\"time\"], collected_entry[\"entry\"]])\n file_io.close()", "def yield_chunked_events(self, events):\n for i in range(0, len(events), 5000):\n yield events[i:i + 5000]", "def test_overlap():\n events = [['Event', '2017-11-21T10:00:00-08:00', '2017-11-21T11:00:00-08:00'],\n ['Event', '2017-11-21T10:30:00-08:00', '2017-11-21T11:20:00-08:00']]\n freetimes, _ = free(events, 9, 0, 17, 0, day_range, 30)\n fmt_freetime = output_format(freetimes)\n for i in fmt_freetime:\n print(i)\n assert fmt_freetime == ['Tue, Nov 21, 9:00 am to Tue, Nov 21, 10:00 am.',\n 'Tue, Nov 21, 11:20 am to Tue, Nov 21, 5:00 pm.',\n 'Wed, Nov 22, 9:00 am to Wed, Nov 22, 5:00 pm.',\n 'Thu, Nov 23, 9:00 am to Thu, Nov 23, 5:00 pm.',\n 'Fri, Nov 24, 9:00 am to Fri, Nov 24, 5:00 pm.',\n 'Sat, Nov 25, 9:00 am to Sat, Nov 25, 5:00 pm.',\n 'Sun, Nov 26, 9:00 am to Sun, Nov 26, 5:00 pm.',\n 'Mon, Nov 27, 9:00 am to Mon, Nov 27, 5:00 pm.']", "def collect_events(helper, ew):\n\n '''\n # The following example writes a random number as an event. (Multi Instance Mode)\n # Use this code template by default.\n import random\n data = str(random.randint(0,100))\n event = helper.new_event(source=helper.get_input_type(), index=helper.get_output_index(), sourcetype=helper.get_sourcetype(), data=data)\n ew.write_event(event)\n '''\n\n '''\n # The following example writes a random number as an event for each input config. (Single Instance Mode)\n # For advanced users, if you want to create single instance mod input, please use this code template.\n # Also, you need to uncomment use_single_instance_mode() above.\n import random\n input_type = helper.get_input_type()\n for stanza_name in helper.get_input_stanza_names():\n data = str(random.randint(0,100))\n event = helper.new_event(source=input_type, index=helper.get_output_index(stanza_name), sourcetype=helper.get_sourcetype(stanza_name), data=data)\n ew.write_event(event)\n '''\n\n if helper.get_log_level() == \"DEBUG\":\n import traceback\n debug = True\n else:\n debug = False\n\n try:\n # Construct Workday client from the provided global config\n rest_api_endpoint = helper.get_global_setting(\"rest_api_endpoint\")\n token_endpoint = helper.get_global_setting(\"token_endpoint\")\n client_id = helper.get_global_setting(\"client_id\")\n client_secret = helper.get_global_setting(\"client_secret\")\n refresh_token = helper.get_global_setting(\"refresh_token\")\n\n empty_fields = []\n if not rest_api_endpoint:\n empty_fields.append(\"Workday REST API Endpoint\")\n if not token_endpoint:\n empty_fields.append(\"Token Endpoint\")\n if not client_id:\n empty_fields.append(\"Client ID\")\n if not client_secret:\n empty_fields.append(\"Client Secret\")\n if not refresh_token:\n empty_fields.append(\"Refresh Token\")\n if len(empty_fields) > 0:\n raise ValueError(\"Empty fields in global configuration: {}\".format(\", \".join(empty_fields)))\n\n wday = Workday(rest_api_endpoint, token_endpoint, client_id, client_secret, refresh_token, http_user_agent=USER_AGENT, helper=helper)\n except ValueError as e:\n helper.log_error(str(e))\n if debug: helper.log_debug(\"\".join(traceback.format_exc()))\n sys.exit(1)\n\n stanza_names = helper.get_input_stanza_names()\n if not isinstance(stanza_names, list):\n stanza_names = [stanza_names]\n\n for stanza_name in stanza_names:\n input_type = helper.get_input_type()\n input_name = helper.get_arg(\"input_name\")\n include_target = helper.get_arg(\"include_target\")\n\n index = helper.get_output_index(stanza_name)\n sourcetype = \"workday:{}\".format(input_name)\n\n if input_name == \"user_activity\":\n\n # Pull checkpoint value and setup query range for this run\n # Only pull up to 5 minutes in the past to allow time for events to be available in the report\n checkpoint_format = \"%Y-%m-%dT%H:%M:%SZ\"\n end = datetime.datetime.utcnow() - datetime.timedelta(minutes=5)\n start = helper.get_check_point(input_name)\n if start is None:\n start = end\n helper.log_info(\"No timestamp checkpoint found for input \\\"{}\\\", starting from now ({})\".format(\n input_name,\n start.strftime(checkpoint_format)\n ))\n # Save current time now to preserve original start time in case of errors\n helper.save_check_point(input_name, end.strftime(checkpoint_format))\n\n else:\n # Confirm that the checkpoint is in the correct format\n try:\n start = datetime.datetime.strptime(start, checkpoint_format)\n except ValueError as e:\n helper.log_error(\"Invalid checkpoint value for input \\\"{}\\\", aborting ({})\".format(input_name, str(e)))\n continue\n\n\n helper.log_info(\"Starting input \\\"{}\\\" for window ({}, {})\".format(\n input_name,\n start.strftime(checkpoint_format),\n end.strftime(checkpoint_format)\n ))\n\n try:\n input_start = time.time()\n results = list(wday.audit_logs(start, end, include_target=include_target))\n\n except requests.exceptions.ConnectionError as e:\n helper.log_error(\"Unable to connect to host\")\n if debug: helper.log_debug(\"\".join(traceback.format_exc()))\n\n except requests.exceptions.Timeout as e:\n helper.log_error(\"Request timed out, retries exhausted\")\n if debug: helper.log_debug(\"\".join(traceback.format_exc()))\n\n except requests.exceptions.HTTPError as e:\n helper.log_error(\"Request failed with error code ({}), retries exhausted\".format(e.response.status_code))\n if debug: helper.log_debug(\"\".join(traceback.format_exc()))\n\n except Exception as e:\n helper.log_error(\"Unknown exception occurred ({})\".format(str(e)))\n if debug: helper.log_debug(\"\".join(traceback.format_exc()))\n\n else:\n\n # Deliberately wait to write events until all are collected with no errors\n # otherwise errors or restarts could cause missing / duplicate events\n for result in results:\n event = helper.new_event(\n source = input_type,\n index = index,\n sourcetype = sourcetype,\n data = json.dumps(result)\n )\n ew.write_event(event)\n\n input_runtime = time.time() - input_start\n event_count = len(results)\n helper.log_info(\"Finished input \\\"{}\\\" for window ({}, {}) in {} seconds, {} events written\".format(\n input_name,\n start.strftime(checkpoint_format),\n end.strftime(checkpoint_format),\n round(input_runtime, 2),\n event_count\n ))\n\n helper.save_check_point(input_name, end.strftime(checkpoint_format))\n\n else:\n helper.log_warning(\"Invalid input \\\"{}\\\", supported values are \\\"{}\\\"\".format(input_name, \"|\".join(VALID_INPUTS)))", "def main():\n credentials = get_credentials()\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n max = 7\n events = getEvents(credentials, now, max)\n if not events:\n print('No upcoming events found.')\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n print(start, event['summary'])\n #addEvent(credentials)", "def process(input_files):\n n_words_event = len(OUT_CONFIG['event']['fields'])\n n_words_hit = len(OUT_CONFIG[args.format]['fields'])\n # Initialising event\n event = -1\n G = Geometry(CONFIGURATION)\n H = HitManager()\n SLs = {}\n for iSL in config.SL_SHIFT.keys():\n SLs[iSL] = SL(iSL, config.SL_SHIFT[iSL], config.SL_ROTATION[iSL])\n # Defining which SLs should be plotted in which global view\n GLOBAL_VIEW_SLs = {\n 'xz': [SLs[0], SLs[2]],\n 'yz': [SLs[1], SLs[3]]\n }\n # Analyzing the hits in each event\n for file_path in input_files:\n # Reading input file line by line\n with open(file_path, 'r') as file_in:\n file_line_nr = 0\n for line in file_in:\n file_line_nr += 1\n if file_line_nr <= 1:\n continue\n hits_lst = []\n H.reset()\n words = line.strip().split()\n event = int(words[0])\n # Skipping event if it was not specified in command line\n if args.events is not None and event not in args.events:\n continue\n nhits = int(words[1])\n print('Event {0:<5d} # hits: {1:d}'.format(event, nhits))\n if args.glance:\n continue\n # Skipping event with too many hits (most likely a spark event that will take forever to process)\n if nhits > args.max_hits:\n continue\n # Extracting hit information\n for iHit in range(nhits):\n start = n_words_event + iHit*n_words_hit\n ww = words[start:start+n_words_hit]\n hits_lst.append([int(ww[0]), int(ww[1]), int(ww[2]), float(ww[3])])\n H.add_hits(hits_lst)\n # Removing hits with time outside the timebox region\n H.hits.drop(H.hits.loc[(H.hits['time'] < config.TIMEBOX[0]) | (H.hits['time'] > config.TIMEBOX[1])].index, inplace=True)\n # Calculating local+global hit positions\n H.calc_pos(SLs)\n # Creating figures of the chambers\n figs = {}\n figs['sl'] = plot.book_chambers_figure(G)\n figs['global'] = plot.book_global_figure(G, GLOBAL_VIEW_SLs)\n # Analyzing hits in each SL\n sl_fit_results = {}\n for iSL, sl in SLs.items():\n # print('- SL', iSL)\n hits_sl = H.hits.loc[H.hits['sl'] == iSL].sort_values('layer')\n if args.plot:\n # Drawing the left and right hits in local frame\n figs['sl'][iSL].square(x=hits_sl['lposx'], y=hits_sl['posz'], size=5,\n fill_color='red', fill_alpha=0.7, line_width=0)\n figs['sl'][iSL].square(x=hits_sl['rposx'], y=hits_sl['posz'], size=5,\n fill_color='blue', fill_alpha=0.7, line_width=0)\n # Performing track reconstruction in the local frame\n sl_fit_results[iSL] = []\n layer_groups = hits_sl.groupby('layer').groups\n n_layers = len(layer_groups)\n # Stopping if lass than 3 layers of hits\n if n_layers < config.NHITS_MIN_LOCAL:\n continue\n hitid_layers = [gr.to_numpy() for gr_name, gr in layer_groups.items()]\n # Building the list of all possible hit combinations with 1 hit from each layer\n hits_layered = list(itertools.product(*hitid_layers))\n # Building more combinations using only either left or right position of each hit\n for hit_ids in hits_layered:\n # print('- -', hit_ids)\n posz = hits_sl.loc[hits_sl.index.isin(hit_ids), 'posz'].values\n posx = hits_sl.loc[hits_sl.index.isin(hit_ids), ['lposx', 'rposx']].values\n posx_combs = list(itertools.product(*posx))\n # Fitting each combination\n fit_results_lr = []\n fit_range = (min(posz), max(posz))\n for iC, posx_comb in enumerate(posx_combs):\n pfit, stats = Polynomial.fit(posz, posx_comb, 1, full=True, window=fit_range, domain=fit_range)\n chi2 = stats[0][0] / n_layers\n if chi2 < config.FIT_CHI2_MAX:\n a0, a1 = pfit\n fit_results_lr.append((chi2, hit_ids, pfit))\n # Keeping only the best fit result from the given set of physical hits\n fit_results_lr.sort(key=itemgetter(0))\n if fit_results_lr:\n sl_fit_results[iSL].append(fit_results_lr[0])\n # Sorting the fit results of a SL by Chi2\n sl_fit_results[iSL].sort(key=itemgetter(0))\n if sl_fit_results[iSL]:\n # Drawing fitted tracks\n posz = np.array([G.SL_FRAME['b']+1, G.SL_FRAME['t']-1], dtype=np.float32)\n for iR, res in enumerate(sl_fit_results[iSL][:5]):\n col = config.TRACK_COLORS[iR]\n posx = res[2](posz)\n figs['sl'][iSL].line(x=posx, y=posz,\n line_color=col, line_alpha=0.7, line_width=3)\n\n if args.plot:\n # Drawing the left and right hits in global frame\n for view, sls in GLOBAL_VIEW_SLs.items():\n sl_ids = [sl.id for sl in sls]\n hits_sls = H.hits.loc[H.hits['sl'].isin(sl_ids)]\n figs['global'][view].square(x=hits_sls['glpos'+view[0]], y=hits_sls['glpos'+view[1]],\n fill_color='red', fill_alpha=0.7, line_width=0)\n figs['global'][view].square(x=hits_sls['grpos'+view[0]], y=hits_sls['grpos'+view[1]],\n fill_color='blue', fill_alpha=0.7, line_width=0)\n # Building 3D segments from the fit results in each SL\n posz = np.array([G.SL_FRAME['b'], G.SL_FRAME['t']], dtype=np.float32)\n for sl in sls:\n for iR, res in enumerate(sl_fit_results[sl.id][:5]):\n posx = res[2](posz)\n start = (posx[0], 0, posz[0])\n end = (posx[1], 0, posz[1])\n segL = Segment(start, end)\n segG = segL.fromSL(sl)\n segG.calc_vector()\n # Extending the global segment to the full height of the view\n start = segG.pointAtZ(plot.PLOT_RANGE['y'][0])\n end = segG.pointAtZ(plot.PLOT_RANGE['y'][1])\n # Getting XY coordinates of the global segment for the current view\n iX = COOR_ID[view[0]]\n posx = [start[iX], end[iX]]\n posy = [start[2], end[2]]\n # Drawing the segment\n col = config.TRACK_COLORS[sl.id]\n figs['global'][view].line(x=posx, y=posy,\n line_color=col, line_alpha=0.7, line_width=3)\n print(sl.id, iR, posx, posy)\n\n\n\n\n\n\n # Storing the figures to an HTML file\n if args.plot:\n plots = [[figs['sl'][l]] for l in [3, 1, 2, 0]]\n plots.append([figs['global'][v] for v in ['xz', 'yz']])\n bokeh.io.output_file(args.output.format(event), mode='cdn')\n bokeh.io.save(bokeh.layouts.layout(plots))", "def create_files_with_aggregates(df):\n # Save data grouped by title and channel\n df.groupby(['title', 'channel'])\\\n .size()\\\n .reset_index(name='counter')\\\n .sort_values(by=['counter'], ascending=False)\\\n .to_csv('views_by_title&channel.xlsx', index=False)\\\n\n\n # Views by channel\n df['channel'].value_counts().to_csv('views_by_channel.xlsx')\n\n # Views by day\n days = list()\n for t in df['time'].str.split('T'):\n # t[0] => day !!! t[1] => time\n days.append(t[0])\n df['day'] = days\n\n df.groupby(['day']).size().reset_index(name='counter').to_csv('views_by_day.xlsx', index=False)\n\n\n # Views by day of week\n df['day'] = pd.to_datetime(df['day'])\n df['day_of_week'] = df['day'].dt.day_name()\n df.groupby(['day_of_week']).size().reset_index(name='counter').to_csv('views_by_day_week.xlsx', index=False)\n\n create_plots(df)\n return df", "def _get_fsevent_files(self):\r\n # Print the header columns to the output files\r\n Output.print_columns(self.l_all_fsevents)\r\n\r\n # Total number of files in events dir #\r\n t_files = len(os.listdir(self.path))\r\n for filename in os.listdir(self.path):\r\n if filename == 'fseventsd-uuid':\r\n t_files -= 1\r\n self.time_range_src_mod = []\r\n prev_mod_date = \"Unknown\"\r\n prev_last_wd = 0\r\n c_last_wd = 0\r\n\r\n # Uses file mod dates to generate time ranges by default unless\r\n # files are carved or mod dates lost due to exporting\r\n self.use_file_mod_dates = True\r\n\r\n # Run simple test to see if file mod dates\r\n # should be used to generate time ranges\r\n # In some instances fsevent files may not have\r\n # their original mod times preserved on export\r\n # This code will flag true when the same date and hour\r\n # exists for the first file and the last file\r\n # in the provided source fsevents folder\r\n first = os.path.join(self.path, os.listdir(self.path)[0])\r\n last = os.path.join(self.path, os.listdir(self.path)[len(os.listdir(self.path)) - 1])\r\n first = os.path.getmtime(first)\r\n last = os.path.getmtime(last)\r\n first = str(datetime.datetime.utcfromtimestamp(first))[:14]\r\n last = str(datetime.datetime.utcfromtimestamp(last))[:14]\r\n\r\n if first == last:\r\n self.use_file_mod_dates = False\r\n\r\n # Iterate through each file in supplied fsevents dir\r\n for filename in os.listdir(self.path):\r\n if filename == 'fseventsd-uuid':\r\n continue\r\n # Variables\r\n self.all_files_count += 1\r\n\r\n # Call the progress bar which shows parsing stats\r\n progress(self.all_files_count, t_files)\r\n\r\n buf = \"\"\r\n\r\n # Full path to source fsevent file\r\n self.src_fullpath = os.path.join(self.path, filename)\r\n # Name of source fsevent file\r\n self.src_filename = filename\r\n # UTC mod date of source fsevent file\r\n self.m_time = os.path.getmtime(self.src_fullpath)\r\n self.m_time = str(datetime.datetime.utcfromtimestamp((self.m_time))) + \" [UTC]\"\r\n\r\n # Regex to match against source fsevent log filename\r\n regexp = re.compile(r'^.*[\\][0-9a-fA-F]{16}$')\r\n\r\n # Test to see if fsevent file name matches naming standard\r\n # if not, assume this is a carved gzip\r\n if len(self.src_filename) == 16 and regexp.search(filename) is not None:\r\n c_last_wd = int(self.src_filename, 16)\r\n self.time_range_src_mod = prev_last_wd, c_last_wd, prev_mod_date, self.m_time\r\n self.is_carved_gzip = False\r\n else:\r\n self.is_carved_gzip = True\r\n\r\n # Attempt to decompress the fsevent archive\r\n try:\r\n with self.skip_gzip_check():\r\n self.files = gzip.GzipFile(self.src_fullpath, \"rb\")\r\n buf = self.files.read()\r\n\r\n except Exception as exp:\r\n # When permission denied is encountered\r\n if \"Permission denied\" in str(exp) and not os.path.isdir(self.src_fullpath):\r\n print('\\nEnsure that you have permissions to read '\r\n 'from {}\\n{}\\n'.format(self.path, str(exp)))\r\n sys.exit(0)\r\n # Otherwise write error to log file\r\n else:\r\n self.logfile.write(\r\n \"%s\\tError: Error while decompressing FSEvents file.%s\\n\" % (\r\n self.src_filename,\r\n str(exp)\r\n )\r\n )\r\n self.error_file_count += 1\r\n continue\r\n\r\n # If decompress is success, check for DLS headers in the current file\r\n dls_chk = FSEventHandler.dls_header_search(self, buf, self.src_fullpath)\r\n\r\n # If check for DLS returns false, write information to logfile\r\n if dls_chk is False:\r\n self.logfile.write('%s\\tInfo: DLS Header Check Failed. Unable to find a '\r\n 'DLS header. Unable to parse File.\\n' % (self.src_filename))\r\n # Continue to the next file in the fsevents directory\r\n self.error_file_count += 1\r\n continue\r\n\r\n self.parsed_file_count += 1\r\n\r\n # Accounts for fsevent files that get flushed to disk\r\n # at the same time. Usually the result of a shutdown\r\n # or unmount\r\n if not self.is_carved_gzip and self.use_file_mod_dates:\r\n prev_mod_date = self.m_time\r\n prev_last_wd = int(self.src_filename, 16)\r\n\r\n # If DLSs were found, pass the decompressed file to be parsed\r\n FSEventHandler.parse(self, buf)", "def save_event(msg, text):\n for e in msg.events:\n text.write(\"%i.%09i\\t%i\\t%i\\t%i\\n\" %( e.ts.secs, e.ts.nsecs, e.x, e.y, e.polarity+0))", "def bootstrap_events():\n import datetime\n import random\n # let's have each event occur once between 3 and 6 pm, \n # some day in the next month, and then recur for the next 1 to 8 weeks\n\n now = datetime.datetime.now().replace(minute=0, second=0)\n\n for program in Program.objects.all():\n start_hour = random.randint(15, 18)\n the_date = now.replace(hour=start_hour) + datetime.timedelta(random.randint(0, 31))\n duration = random.randint(1,6) * 30\n next_week = datetime.timedelta(7)\n\n program.events.add(EventDate.objects.create(\n date=the_date, \n duration_mins = duration))\n\n for next_occur in range(random.randint(1,8)):\n the_date += next_week\n program.events.add(EventDate.objects.create(\n date = the_date,\n duration_mins = duration))\n\n print \"Scheduled\", program.events.count(), \"events for\", program", "def log_event_to_file(event):\n with open('eventlogs/{}.json'.format(time.time()), 'w') as event_write:\n event_write.write(json_dumpstring(event))\n pass", "def print_event_list(event_list, my_file):\n\tlogger.debug(\"Outputting list of events to page...\")\n\tlogger.debug(\"List:\\n\" + str(event_list))\n\tfor event in event_list:\n\t\tprint >> my_file, \"\"\"<TR>\"\"\"\n\t\tprint_event(event, my_file)\n\t\tprint >> my_file, \"\"\"</TR>\"\"\"", "def create_file_output(self, results):\n for key, value in results.table_output.items():\n name_timestamp = key.split('&')\n _name = name_timestamp[0]\n timestamp = name_timestamp[1]\n file_name = output_file_prefix + \"-\" + _name + \".csv\"\n if file_name not in self.file_creation_set:\n self._header_written = False\n self.file_creation_set.update([file_name])\n for row in value:\n with open(file_name, 'a+') as file_to_write:\n row.update({'Timestamp': timestamp})\n _keys = row.keys()\n file_output = csv.DictWriter(file_to_write, _keys)\n if not self._header_written:\n file_output.writeheader()\n self._header_written = True\n file_output.writerow(row)\n file_to_write.close()\n return results", "def generate_fire_time_series(self):\n\n self.fire_events =[]\n event = self.generate_fire_recurrence()\n end_event = event + 365.0\n self.fire_events.append([event, end_event])\n t = 0\n i = 0\n while t <= self.total_run_time:\n fire = self.generate_fire_recurrence()\n start_fire = self.fire_events[i][0] + (fire)\n end_fire = start_fire + (365.0)\n self.fire_events.append([start_fire, end_fire])\n t += end_fire\n i+=1", "def AllindividualRuns():\n #800 nm\n RunData(getFiles(mintime=(15, 40, 07), maxtime=(15, 45, 14), folder='data/29Jul/'), out='I800nm')\n RunData(getFiles(mintime=(15, 12, 20), maxtime=(15, 24, 16), folder='data/31Jul/'), out='I800nm5k')\n RunData(getFiles(mintime=(15, 28, 40), maxtime=(15, 39, 21), folder='data/31Jul/'), out='I800nm10k')\n RunData(getFiles(mintime=(15, 43, 24), maxtime=(15, 51, 47), folder='data/31Jul/'), out='I800nm20k')\n RunData(getFiles(mintime=(15, 56, 11), maxtime=(16, 02, 58), folder='data/31Jul/'), out='I800nm30k')\n RunData(getFiles(mintime=(16, 12, 39), maxtime=(16, 18, 25), folder='data/31Jul/'), out='I800nm38k')\n RunData(getFiles(mintime=(16, 21, 52), maxtime=(16, 26, 16), folder='data/31Jul/'), out='I800nm50k')\n RunData(getFiles(mintime=(16, 32, 02), maxtime=(16, 35, 23), folder='data/31Jul/'), out='I800nm54k')\n #700 nm\n RunData(getFiles(mintime=(17, 20, 17), maxtime=(17, 33, 17), folder='data/30Jul/'), out='I700nm5k')\n RunData(getFiles(mintime=(17, 37, 35), maxtime=(17, 46, 51), folder='data/30Jul/'), out='I700nm9k')\n RunData(getFiles(mintime=(17, 48, 35), maxtime=(17, 56, 03), folder='data/30Jul/'), out='I700nm52k')\n RunData(getFiles(mintime=(17, 58, 18), maxtime=(17, 59, 31), folder='data/30Jul/'), out='I700nm32k')\n #600 nm\n RunData(getFiles(mintime=(15, 22, 00), maxtime=(15, 36, 32), folder='data/30Jul/'), out='I600nm5k')\n RunData(getFiles(mintime=(15, 39, 58), maxtime=(15, 47, 58), folder='data/30Jul/'), out='I600nm54k')\n RunData(getFiles(mintime=(15, 52, 07), maxtime=(16, 06, 32), folder='data/30Jul/'), out='I600nm10k')\n #890 nm\n RunData(getFiles(mintime=(13, 37, 37), maxtime=(13, 50, 58), folder='data/01Aug/'), out='I890nm5k')\n RunData(getFiles(mintime=(14, 00, 58), maxtime=(14, 11, 54), folder='data/01Aug/'), out='I890nm10k')\n RunData(getFiles(mintime=(14, 17, 57), maxtime=(14, 25, 49), folder='data/01Aug/'), out='I890nm30k')\n RunData(getFiles(mintime=(14, 30, 03), maxtime=(14, 34, 37), folder='data/01Aug/'), out='I890nm50k')", "def build_custom_log(\n dp_shell_history: Path,\n fp_results: Path,\n *,\n daterange: List[str],\n username: str = None,\n wdir: Path = None,\n hostname: str = None,\n regexp: str = None,\n unique: bool = False,\n) -> None:\n dt_start, dt_end = get_daterange(daterange)\n\n log.trace(\"dt_start: {}\", dt_start) # type: ignore\n log.trace(\"dt_end: {}\", dt_end) # type: ignore\n\n hostname = os.uname().nodename if hostname is None else hostname\n regexp = \".*\" if regexp is None else regexp\n\n with fp_results.open(\"w\") as f:\n f.write(f\"# vim: filetype={SCRIPTNAME}\\n\\n\")\n\n dt_tmp = dt_start\n entry_count = 0\n while date_ym_value(dt_tmp) <= date_ym_value(dt_end):\n fp_log = Path(\n f\"{dp_shell_history}/{hostname}/{dt_tmp.year}/\"\n f\"{str(dt_tmp.month).zfill(2)}.log\"\n )\n\n try:\n if hostname.lower() == \"all\":\n fp_log = merge_hosts(\n dp_shell_history, dt_tmp.year, dt_tmp.month\n )\n\n skip_date_check = (\n dt_tmp.month != dt_start.month or dt_tmp.year != dt_start.year\n ) and (dt_tmp.month != dt_end.month or dt_tmp.year != dt_end.year)\n\n log_lines = process_logfile(\n fp_log,\n dt_start=dt_start,\n dt_end=dt_end,\n regexp=regexp,\n username=username,\n wdir=wdir,\n unique=unique,\n skip_date_check=skip_date_check,\n )\n\n with fp_results.open(\"a+\") as f:\n f.writelines(log_lines)\n\n entry_count += len(log_lines)\n except LogsNotFound:\n log.debug(f\"No Log Files for {dt_tmp.month}-{dt_tmp.year} Exist.\")\n finally:\n dt_tmp = dt_tmp + relativedelta(months=1)\n\n with fp_results.open(\"a+\") as f:\n f.write(\n f\"# Number of shell commands matched by {SCRIPTNAME} query: \"\n f\"{entry_count}\"\n )", "def _most_recent_event_files(self):\n regex = re.compile(r\"\\w*events.log\")\n return [\n os.path.join(self._output_dir, x)\n for x in os.listdir(self._output_dir)\n if regex.search(x)\n ]", "def _write_event(self, event, masses, pdg_codes):\n\n self.file.write(\"<event>\\n\")\n\n self.file.write(\n \"%i 0 -1.0000000E+00 -1.0000000E+00 -1.0000000E+00 -1.0000000E+00\\n\"\n % (self.n_particles)\n )\n\n for momentum, mass, pdg in zip(event, masses, pdg_codes):\n\n self._write_particle(momentum, mass, pdg)\n\n self.file.write(\"</event>\\n\")", "def testExportEvents(self):\n test_file_path = self._GetTestFilePath(['psort_test.plaso'])\n self._SkipIfPathNotExists(test_file_path)\n\n knowledge_base_object = knowledge_base.KnowledgeBase()\n\n test_file_object = io.StringIO()\n\n output_mediator_object = output_mediator.OutputMediator(\n knowledge_base_object, data_location=shared_test_lib.TEST_DATA_PATH)\n\n formatters_directory_path = self._GetDataFilePath(['formatters'])\n output_mediator_object.ReadMessageFormattersFromDirectory(\n formatters_directory_path)\n\n output_mediator_object.SetPreferredLanguageIdentifier('en-US')\n\n output_module = dynamic.DynamicOutputModule(output_mediator_object)\n output_module._file_object = test_file_object\n\n configuration = configurations.ProcessingConfiguration()\n\n storage_reader = storage_factory.StorageFactory.CreateStorageReaderForFile(\n test_file_path)\n\n test_engine = psort.PsortMultiProcessEngine()\n\n test_engine.ExportEvents(\n knowledge_base_object, storage_reader, output_module, configuration)\n\n output = test_file_object.getvalue()\n lines = output.split('\\n')\n\n self.assertEqual(len(lines), 22)\n\n expected_line = (\n '2014-11-18T01:15:43+00:00,'\n 'Content Modification Time,'\n 'LOG,'\n 'Log File,'\n '[---] last message repeated 5 times ---,'\n 'syslog,'\n 'OS:/tmp/test/test_data/syslog,'\n 'repeated')\n self.assertEqual(lines[14], expected_line)", "def create_ical_file(list_of_events, strasse, hausnummer):\n cal = Calendar()\n\n # Some properties are required to be compliant:\n cal.add('prodid', '-//My calendar product//mxm.dk//')\n cal.add('version', '2.0')\n\n global total_number_of_events\n total_number_of_events = len(list_of_events)\n\n all_ical_events = create_cal_events(list_of_events, strasse, hausnummer)\n for evnt in all_ical_events:\n # Add the event to the calendar:\n cal.add_component(evnt)\n\n cal_as_ical = cal.to_ical()\n create_folder_if_not_exists()\n # Write iCal file to disk\n return save_ical_file(cal_as_ical, get_filename(strasse, hausnummer))", "def pulsEphem(self):\n\n hduMain = fits.open(self.ft1)\n\n # --------------------------------------------------------------------------------------------- #\n # Split the FT1 file every 4000 events\n noEv = 0\n deltEv = 5000\n count = 0\n wfil = open(os.path.dirname(self.ft1) + os.path.basename('tmpFT1.lis'), 'w')\n while noEv <= self.nevents:\n hduCols = []\n for colname, form, uni in zip(hduMain['EVENTS'].columns.names, hduMain['EVENTS'].columns.formats, hduMain['EVENTS'].columns.units):\n hduCols.append( fits.Column(name=colname, array=hduMain['EVENTS'].data[colname][noEv:noEv+deltEv], format=form, unit=uni) )\n # Updte the tstart and tstop in the header in order for tempo2 to work...\n hduMain['EVENTS'].header.set('TSTART', hduMain['EVENTS'].data['TIME'][noEv:noEv+deltEv][0])\n hduMain['EVENTS'].header.set('TSTOP', hduMain['EVENTS'].data['TIME'][noEv:noEv+deltEv][-1])\n newHDU = fits.BinTableHDU.from_columns(hduCols, name='EVENTS', header=hduMain['EVENTS'].header) \n hdulist = fits.HDUList([hduMain['PRIMARY'], newHDU, hduMain['GTI']])\n tmpName = os.path.dirname(self.ft1)+os.path.basename('tempFT1_'+str(count)+'.fits')\n hdulist.writeto(tmpName, clobber=True)\n wfil.write(tmpName + '\\n')\n noEv += deltEv\n count += 1\n if noEv != self.nevents:\n hduCols = []\n noEv -= deltEv\n for colname, form, uni in zip(hduMain['EVENTS'].columns.names, hduMain['EVENTS'].columns.formats, hduMain['EVENTS'].columns.units):\n hduCols.append( fits.Column(name=colname, array=hduMain['EVENTS'].data[colname][noEv:self.nevents], format=form, unit=uni) )\n hduMain['EVENTS'].header.set('TSTART', hduMain['EVENTS'].data['TIME'][noEv:self.nevents][0])\n hduMain['EVENTS'].header.set('TSTOP', hduMain['EVENTS'].data['TIME'][noEv:self.nevents][-1])\n newHDU = fits.BinTableHDU.from_columns(hduCols, name='EVENTS', header=hduMain['EVENTS'].header)\n hdulist = fits.HDUList([hduMain['PRIMARY'], newHDU, hduMain['GTI']])\n tmpName = os.path.dirname(self.ft1)+os.path.basename('tempFT1_'+str(count)+'.fits')\n hdulist.writeto(tmpName, clobber=True)\n wfil.write(tmpName + '\\n')\n wfil.close()\n\n hduMain.close()\n\n # --------------------------------------------------------------------------------------------- #\n # Run tempo2 for each piece of the FT1\n rfil = open(os.path.dirname(self.ft1) + 'tmpFT1.lis', 'r')\n percent = 0\n nbFiles = sum(1 for line in open(os.path.dirname(self.ft1) + 'tmpFT1.lis', 'r'))\n count = 0\n for tmpFil in rfil:\n # Print a progression bar every 5%\n if ( count / np.floor(nbFiles) * 100 ) >= percent:\n self._progressBar(percent, printEvery=5)\n percent += 5\n with open(os.devnull, 'wb') as devnull:\n subprocess.check_call(['/dsm/fermi/fermifast/glast/tempo2-2013.9.1/tempo2',\n '-gr', 'fermi', '-ft1', tmpFil[:-1], '-ft2', self.ft2, '-f', self.ephem,\n '-phase'], stdout=devnull, stderr=subprocess.STDOUT)\n count += 1\n # Replace the old ft1 by the new one with the PULSE_PHASE column\n #os.remove()\n self._gtSelect(data = os.path.dirname(self.ft1) + os.path.basename('tmpFT1.lis'))\n\n\n\n\n #self.nevents\n #J2032+4127_54683_57791_chol_pos.par\n #os.popen(\"tempo2 -gr fermi -ft1 {} -ft2 {} -f {} -phase\".format(self.ft1, self.ft2, self.ephem))", "def total_fire_power_time_series(files, bounding_box):\n \n assert isinstance(bounding_box, BoundingBox)\n bb = bounding_box\n \n results = {}\n \n vals = map(_process_single_fire_power_time_series, zip(files, itertools.repeat(bb)))\n vals = (val for val in vals if val is not None)\n \n for time, val, fname in vals:\n results[time] = (val, fname)\n \n return results", "def envelope_delays(fin, delayfile, maxcount=1000000, new=False):\n logging.basicConfig(filename='parse_envelope_log.log', filemode='w',\n level=logging.DEBUG)\n if new:\n pat = r'(\\S+ \\S+) \\[envelope/info/VsMagnitude\\] Current time: (\\S+);'\n pat += r' Envelope: timestamp: (\\S+) waveformID: (\\S+)'\n cnt = 0\n streams = defaultdict(dict)\n delays = defaultdict(list)\n f = open(fin)\n first = None\n last = None\n while True:\n line = f.readline()\n if not line: break\n if cnt > maxcount: break\n match = re.search(pat, line)\n if match:\n ttmp = match.group(1)\n dt, t = ttmp.split()\n year, month, day = map(int, dt.split('/'))\n hour, min, sec = map(int, t.split(':'))\n logtime = UTCDateTime(year, month, day, hour, min, sec)\n currentTime = UTCDateTime(match.group(2))\n # the timestamp marks the beginning of the data window\n # so we have to add one second to get the time of the end\n # of the data window\n timestamp = UTCDateTime(match.group(3)) + 1.\n ts_string = timestamp.strftime(\"%Y-%m-%dT%H:%M:%S\")\n wID = match.group(4)\n station = wID.split('.')[0] + '.' + wID.split('.')[1]\n net = wID.split('.')[0]\n tdiff = currentTime - timestamp\n streams[wID][ts_string] = currentTime\n # We are looking for the time that is required to have 3 s of\n # envelopes following a P arrival. We therefore have to add the\n # time difference of the envelope 3 s after the arrival of the\n # current one which is equivalent to measuring the time\n # difference to the arrival time of the envelope 3 s before\n # the latest one.\n if len(streams[wID].keys()) >= 4:\n try:\n old_ts = (timestamp - 3.).strftime(\"%Y-%m-%dT%H:%M:%S\")\n old_ct = streams[wID][old_ts]\n tdiff += (currentTime - old_ct)\n # tdiff = currentTime - old_ct\n except Exception, e:\n logging.debug('%s %s: %s' % (wID, old_ts, e))\n continue\n else:\n continue\n if cnt == 0:\n first = timestamp\n if cnt == maxcount:\n last = timestamp\n delays[station].append(tdiff)\n cnt += 1\n else:\n print \"problem with line %d\" % cnt\n print line\n break\n print first\n print last\n f.close()\n fh = open(delayfile, 'w')\n json.dump(delays, fh)\n fh.close()\n else:\n fh = open(delayfile)\n delays = json.load(fh)\n fh.close()\n return delays", "def write_stacked_response_times(self):\r\n results_dirname = get_param(\"results_dir\")\r\n filename = os.path.join(results_dirname, \"%s_%s\" % (get_param(\"file_prefix\"),\r\n \"stacked_fairness\"))\r\n file = open(filename, \"w\")\r\n file.write(\"time\\trunning_tasks\\n\")\r\n previous_time = -1\r\n # Write in reverse order so that we automatically get the last event\r\n # for each time.\r\n for time, running_tasks in reversed(self.new_running_tasks):\r\n if time != previous_time:\r\n if previous_time != -1:\r\n file.write(\"%d\\t\" % time)\r\n for user in range(get_param(\"num_users\")):\r\n file.write(\"%d\\t\" % running_tasks[user])\r\n file.write(\"\\n\")\r\n previous_time = time", "def RWC_table(datasource, output, time):\n with open(output + '_' + time + '.csv', 'wb') as destination:\n writer = csv.writer(destination)\n if time == 'day':\n writer.writerow(['file'] + lod)\n elif time == 'week':\n writer.writerow(['file'] + weeks)\n \n for file in os.listdir(datasource):\n rowlist = []\n if file.endswith(\".csv\"):\n rowlist.append(file)\n print(file)\n\n # Check timeinterval and run pipeline with those settings\n if time == 'day':\n for day in tqdm(lod):\n rowlist.append(pipeline(datasource + file, [day], False, False))\n writer.writerow(rowlist)\n\n if time == 'week':\n for week in tqdm(weeks):\n rowlist.append(pipeline(datasource + file, week, False, False))\n writer.writerow(rowlist)", "def store_grouped_data(data,path):\n i = 0\n for name, group in data:\n l = len(group)\n print name, \", \", l\n if l > 999:\n group.to_csv(path + \"//clean.events\"+ str(i), index=False)\n i += 1", "def write_running_tasks(self, file, tasks_list):\r\n file.write(\"time\\trunning_tasks\\n\")\r\n previous_time = -1\r\n # Write in reverse order so that we automatically get the last event\r\n # for each time.\r\n for time, running_tasks in reversed(tasks_list):\r\n if time != previous_time:\r\n if previous_time != -1:\r\n file.write(\"%d\\t%d\\n\" % (previous_time, running_tasks))\r\n file.write(\"%d\\t%d\\n\" % (time, running_tasks))\r\n previous_time = time", "def file_output(matches: list, output_file_name: str = 'matches.txt'):\n with open(\"test/Matches/\" + output_file_name, 'w') as f:\n for match in matches:\n for event in match.events:\n f.write(\"%s\\n\" % event.payload)\n f.write(\"\\n\")", "def grouping(filename, outdir, minsog, maxsog):\n records = Records(Extractor.extract_records(filename))\n\n groups = records.group(minsog, maxsog)\n for key in groups:\n rw = RecordsWriter(groups[key])\n rw.write_to_dir(key + \".fasta\", outdir)", "def testInternalExportEventsDeduplicate(self):\n knowledge_base_object = knowledge_base.KnowledgeBase()\n\n output_mediator_object = output_mediator.OutputMediator(\n knowledge_base_object, data_location=shared_test_lib.TEST_DATA_PATH)\n\n formatters_directory_path = self._GetDataFilePath(['formatters'])\n output_mediator_object.ReadMessageFormattersFromDirectory(\n formatters_directory_path)\n\n output_module = TestOutputModule(output_mediator_object)\n\n test_engine = psort.PsortMultiProcessEngine()\n\n with shared_test_lib.TempDirectory() as temp_directory:\n temp_file = os.path.join(temp_directory, 'storage.plaso')\n self._CreateTestStorageFile(temp_file)\n self._ReadSessionConfiguration(temp_file, knowledge_base_object)\n\n storage_reader = (\n storage_factory.StorageFactory.CreateStorageReaderForFile(temp_file))\n storage_reader.ReadSystemConfiguration(knowledge_base_object)\n\n test_engine._ExportEvents(storage_reader, output_module)\n\n self.assertEqual(len(output_module.events), 15)\n self.assertEqual(len(output_module.macb_groups), 3)", "def total_fire_power_time_series_par(files, bounding_box):\n \n assert isinstance(bounding_box, BoundingBox)\n bb = bounding_box\n \n results = {}\n with get_context('spawn').Pool() as pool:\n \n vals = pool.map(_process_single_fire_power_time_series, zip(files, itertools.repeat(bb)))\n vals = (val for val in vals if val is not None)\n \n for time, val, fname in vals:\n results[time] = (val, fname)\n \n return results", "def write_time_series(temperature, time_series_collector, file_name):\n with open(\"./Results/\" + file_name + \"-T{:.4f}.csv\".format(temperature), 'w') as f:\n for i, line in enumerate(zip(*time_series_collector)):\n if i < len(time_series_collector[0]) - 1:\n f.write(\"%s\\n\" % \", \".join([str(element) for element in line]))\n else:\n f.write(\"%s\" % \", \".join([str(element) for element in line]))", "def events(time):\n\n event_list = eventlist()\n idx = np.all(time == event_list[:, 0:len(time)], axis=1)\n return event_list[idx,:]", "def writeToLogFile(self, event):\n outPutStr = '{:013}'.format(0)\n logOutPutStr = outPutStr + '\\t' + '{:.2f}'.format (time ()) + '\\t' + event + '\\t' + datetime.fromtimestamp (int (time())).isoformat (' ')\n printOutPutStr = outPutStr + '\\t' + datetime.fromtimestamp (int (time())).isoformat (' ') + '\\t' + event\n print (printOutPutStr)\n if self.logFP is not None:\n self.logFP.write(logOutPutStr + '\\n')\n self.logFP.flush()", "def summary_info_events(filename):\n # filename = self.out_filename('events')\n print('Reading {}'.format(filename))\n table = Table.read(str(filename), hdu='EVENTS')\n data = dict()\n \n # Copy over header info to the summary table\n data['RA_PNT'] = np.float32(table.meta['RA_PNT'])\n data['DEC_PNT'] = np.float32(table.meta['DEC_PNT'])\n #data['GLON_PNT'] = np.float32(table.meta['GLON_PNT'])\n #data['GLAT_PNT'] = np.float32(table.meta['GLAT_PNT'])\n data['ALT_PNT'] = np.float32(table.meta['ALT_PNT'])\n data['AZ_PNT'] = np.float32(table.meta['AZ_PNT'])\n data['ZEN_PNT'] = np.float32(90. - table.meta['ALT_PNT'])\n data['ONTIME'] = np.float32(table.meta['ONTIME'])\n data['LIVETIME'] = np.float32(table.meta['LIVETIME'])\n data['DEADC'] = np.float32(table.meta['DEADC'])\n\n MJDREFI = table.meta['MJDREFI']\n MJDREFF = table.meta['MJDREFF']\n MJDREF = MJDREFI + MJDREFF\n\n TSTART_MET = table.meta['TSTART'] / 3600. / 24.\n TSTOP_MET = table.meta['TSTOP'] / 3600. / 24.\n\n start_time = Time(MJDREF + TSTART_MET, scale='tt', format='mjd')\n stop_time = Time(MJDREF + TSTOP_MET, scale='tt', format='mjd')\n\n data['TSTART'] = np.float32(start_time.utc.mjd)\n data['TSTOP'] = np.float32(stop_time.utc.mjd)\n data['TSTART_STR'] = str(start_time.utc.iso[:-4])\n data['TSTOP_STR'] = str(stop_time.utc.iso[:-4])\n\n data['N_TELS'] = table.meta['N_TELS']\n data['TELLIST'] = table.meta['TELLIST']\n try:\n data['OBJECT'] = table.meta['OBJECT']\n except KeyError:\n data['OBJECT'] = \"\"\n data['RA_OBJ'] = np.float32(table.meta['RA_OBJ'])\n data['DEC_OBJ'] = np.float32(table.meta['DEC_OBJ'])\n\n # data['OBS_MODE'] = table.meta['OBS_MODE']\n\n try:\n data['MUONEFF'] = np.float32(table.meta['MUONEFF'])\n except KeyError:\n data['MUONEFF'] = np.float32(-1)\n\n # Calculate some summary statistics for important event columns\n data['EVENT_COUNT'] = len(table)\n data['EVENT_TIME_MIN'] = table['TIME'].min()\n data['EVENT_TIME_MAX'] = table['TIME'].max()\n data['EVENT_ENERGY_MEDIAN'] = np.float32(np.median(table['ENERGY']))\n data['EVENT_RA_MEDIAN'] = np.float32(np.median(table['RA']))\n data['EVENT_DEC_MEDIAN'] = np.float32(np.median(table['DEC']))\n\n return data", "def export_events_gtf(self, edge):\n strand = self.gene.strand\n for event in self.positive_ids:\n full_event = '{}:{}'.format(self.etype, event)\n e_vals = full_event.replace('-', ':').split(':')\n\n line1 = self.gtf_string.format(int(e_vals[2]), int(e_vals[3]) + edge, strand, full_event, full_event,\n 'alternative1')\n yield line1, self.etype\n\n line3 = self.gtf_string.format(e_vals[2], int(e_vals[3]) + edge, strand, full_event, full_event,\n 'alternative2')\n yield line3, self.etype", "def export_events_gtf(self, edge):\n strand = self.gene.strand\n for event in self.positive_ids:\n full_event = '{}:{}'.format(self.etype, event)\n e_vals = full_event.replace('-', ':').split(':')\n\n line1 = self.gtf_string.format(int(e_vals[2]) - edge, e_vals[2], strand, full_event, full_event,\n 'alternative1')\n yield line1, self.etype\n\n line2 = self.gtf_string.format(e_vals[3], e_vals[4], strand, full_event, full_event,\n 'alternative1')\n yield line2, self.etype\n\n line3 = self.gtf_string.format(e_vals[9], int(e_vals[9]) + edge, strand, full_event, full_event,\n 'alternative1')\n yield line3, self.etype\n\n line4 = self.gtf_string.format(int(e_vals[2]) - edge, e_vals[2], strand, full_event, full_event,\n 'alternative2')\n yield line4, self.etype\n\n line5 = self.gtf_string.format(e_vals[7], e_vals[8], strand, full_event, full_event,\n 'alternative2')\n yield line5, self.etype\n\n line6 = self.gtf_string.format(e_vals[9], int(e_vals[9]) + edge, strand, full_event, full_event,\n 'alternative2')\n yield line6, self.etype", "def search_files_keyword_specific(keywrd_list, source_dir_path,output_file, start_dt_obj, end_dt_obj):\n\tfile_obj = open(output_file,'a')\n\tfor folder_path in [x[0] for x in os.walk(source_dir_path)]:\n\t\tfor keyword in keywrd_list:\n\t\t\tfile_list = glob.glob(os.path.join(folder_path,keyword))\n\t\t\tfor file_path in file_list:\n\t\t\t\tfile_date = datetime.fromtimestamp(os.path.getctime(file_path))\n\t\t\t\t#Date compare\n\t\t\t\tif file_date >= start_dt_obj and file_date <= end_dt_obj:\n\t\t\t\t\tfile_obj.write(file_path+'\\n')\n\t\t\t\t\tprint 'Limit :',file_date, file_path\n\t\t\t\telse:\n\t\t\t\t\tprint 'Beyond:',file_date,file_path\n\tfile_obj.close()", "def database_extraction(mod_start_time,mod_end_time,instrument_chosen,subevent_bool,\r\n detect_previous_event = False,thresholds='100,1',\r\n one_thresh = False):\r\n obs_file_created = False\r\n\r\n #extending time window\r\n window_end_time = (mod_end_time + timedelta(days=2))\r\n window_start_time = (mod_start_time - timedelta(days=2))\r\n \r\n #making a list of all dates within window\r\n day_list=[]\r\n for d in range(10):\r\n day_list.append((window_start_time + timedelta(days=d)).date())\r\n print('day list = %s' %day_list)\r\n \r\n print('determining if an instrument has been chosen')\r\n\r\n if instrument_chosen:\r\n #if an instrument has been chosen, checking to make sure it still works for this date\r\n if inst_end < window_end_time:\r\n instrument_chosen = False\r\n else:\r\n #if insturment hasn't been chosen, figuring out what it should be for given date\r\n try:\r\n #if instrument is specified in cfg using that\r\n instrument = cfg.instrument\r\n inst_end = datetime.today()\r\n print('using %s as our instrument for observations' %instrument)\r\n instrument_chosen = True\r\n\r\n except:\r\n #choosing instrument using function if not given in cfg\r\n instrument_stuff = choose_prime_inst(window_start_time.date(),\r\n window_end_time.date())\r\n instrument = instrument_stuff[0]\r\n #figuring out how long we can use this instrument\r\n inst_end = instrument_stuff[1]\r\n instrument_chosen = True\r\n \r\n #running katie's code to extract data using chosen instrument and dates\r\n print('extracting data from GOES website')\r\n \r\n #running for only one threshold if one_thresh is true, otherwise running for default\r\n #thresholds as well as any additional threshold given\r\n if one_thresh:\r\n one_sep.run_all(str(window_start_time), str(window_end_time), str(instrument),\r\n 'integral', '', '', True, detect_previous_event, thresholds) \r\n print('ran for threshold %s' %thresholds)\r\n else:\r\n if subevent_bool:\r\n thresholds = '10,1'\r\n #if event is a subevent, changing the threshold in katie's code to\r\n #10 MeV > 1pfu so that it will be recorded\r\n print('********************SUBEVENT**************************')\r\n sep.run_all(str(window_start_time), str(window_end_time), str(instrument),\r\n 'integral', '', '', True, detect_previous_event, thresholds)\r\n print('ran for subevent')\r\n else:\r\n #if an event, running with usual thresholds\r\n print('********************EVENT*****************************')\r\n sep.run_all(str(window_start_time), str(window_end_time),str(instrument), \r\n 'integral', '', '', True, detect_previous_event, thresholds)\r\n \r\n #reloading function so it doesn't keep old data \r\n reload(sep)\r\n \r\n #reformatting csv created from katie's code to json\r\n print('extracted - reformatting') \r\n for day in day_list: \r\n if not obs_file_created:\r\n #checking each day within the window to find the csv file if it hasn't\r\n #already been found\r\n print('thresholds: %s' %thresholds)\r\n \r\n if one_thresh:\r\n #name includes threshold if only ran for one threshold\r\n new_obs_name = ('sep_values_' + str(instrument) + '_integral_gt' +\r\n str(thresholds).split(',')[0] + '_' + str(thresholds).split(',')[1] + 'pfu_' +\r\n day.strftime('%Y_%m_%d').replace('_0','_') + '.csv')\r\n else:\r\n #otherwise only includes date ran for\r\n new_obs_name = ('sep_values_' + str(instrument) + '_integral_' +\r\n day.strftime('%Y_%m_%d').replace('_0','_') + '.csv')\r\n \r\n print('new_os_name %s' %new_obs_name) \r\n \r\n #checking if that file exists\r\n if os.path.exists(katies_path / new_obs_name):\r\n #if a file with this date exists, creating the corresponding json file\r\n \r\n #json name\r\n if one_thresh:\r\n obs_name = (str(instrument) + '_' + str(day) + 'only_' + str(thresholds).split(',')[0] + 'MeV_event.json')\r\n else:\r\n obs_name = (str(instrument) + '_' +\r\n str(day) + '.json')\r\n #creating json file\r\n obs_csv2json((katies_path / new_obs_name), obs_name,\r\n (ref_path/'example_sepscoreboard_json_file_v20190228.json'),\r\n instrument)\r\n \r\n print('obs file created')\r\n #file is created - will not run for anymore dates within window\r\n obs_file_created = True\r\n \r\n return(obs_name)\r\n else:\r\n print('no csv file found with this date, checking next one')", "def export_events_gtf(self, edge):\n strand = self.gene.strand\n for event in self.positive_ids:\n full_event = '{}:{}'.format(self.etype, event)\n e_vals = full_event.replace('-', ':').split(':')\n\n line1 = self.gtf_string.format(e_vals[2], e_vals[3], strand, full_event, full_event,\n 'alternative1')\n yield line1, self.etype\n\n line2 = self.gtf_string.format(e_vals[4], int(e_vals[4]) + edge, strand, full_event, full_event,\n 'alternative1')\n yield line2, self.etype\n\n line3 = self.gtf_string.format(e_vals[5], e_vals[6], strand, full_event, full_event,\n 'alternative2')\n yield line3, self.etype\n\n line4 = self.gtf_string.format(int(e_vals[7]), int(e_vals[7]) + edge, strand, full_event, full_event,\n 'alternative2')\n yield line4, self.etype", "def collect_data(table_name):\n global NTU2_Med\n global NTU3_Med\n # Start date for data collection, should be fifteen minutes in the past\n start_date_form = datetime.now() - timedelta(minutes=15)\n\n # End date for data collection, should be now, to complete our 15 minute interval\n end_date_form = datetime.now()\n \n # Check which platform program is running on, if windows treat as binary\n if platform == 'Linux':\n table_file = os.open(table_name + '.csv', os.O_WRONLY | os.O_APPEND | os.O_CREAT)\n else:\n table_file = os.open(table_name + '.csv', os.O_BINARY | os.O_WRONLY | os.O_APPEND | os.O_CREAT)\n \n #Pull data from table on logger\n table_data = device.get_data(table_name, start_date_form, end_date_form)\n\n # Get 15 minute medians\n if table_name == \"Table15min\":\n # Iterate through table data, and set medians\n for i in table_data:\n NTU2_Med = int(i['TurbNTU2_Med'])\n output = \"NTU2_Med: \" + str(i['TurbNTU2_Med']) + \"\\n\" \n print(output)\n os.write(log_file, output)\n NTU3_Med = int(i['TurbNTU3_Med'])\n output = \"NTU3_Med: \" + str(i['TurbNTU3_Med']) + \"\\n\"\n print(output)\n os.write(log_file, output)\n # Set headers if applicable and convert dictionary to csv file\n if has_ran:\n output = \"Script has already ran at least once\\n\"\n os.write(log_file, output)\n table_csv = utils.dict_to_csv(table_data, \",\", header=False)\n else:\n output = \"Script has not already ran\\n\"\n os.write(log_file, output)\n table_csv = utils.dict_to_csv(table_data, \",\", header=True)\n\n output = \"Writing file to local storage\\n\"\n os.write(log_file, output)\n\n # Write table file to system\n os.write(table_file, table_csv.encode('UTF-8'))\n\n #Close file descriptor\n os.close(table_file)\n\n output = \"uploading file to server\\n\"\n os.write(log_file, output)\n\n # Upload/Append data to server\n put_data(table_name)\n\n output = \"Wrote file to server\\n\"\n os.write(log_file, output)\n\n return 0", "def summarize(self, data, order=11, verbose=False):\n self.intervals = np.diff(self.timebase[self.onsets]) # event intervals\n i_decay_pts = int(2*self.taus[1]/self.dt) # decay window time (points)\n self.peaks = []\n self.smpkindex = []\n self.smoothed_peaks = []\n self.amplitudes = []\n self.Qtotal = []\n self.averaged = False # set flags in case of no events found\n self.individual_events = False\n self.fitted = False\n self.fitted_tau1 = np.nan\n self.fitted_tau2 = np.nan\n self.Amplitude = np.nan\n self.avg_fiterr = np.nan\n ndata = len(data)\n avgwin = 5 # int(1.0/self.dt) # 5 point moving average window for peak detection\n# print('dt: ', self.dt)\n mwin = int((0.050)/self.dt)\n# print('mwin: ', mwin)\n #order = int(0.0004/self.dt)\n # print('onsets: ', self.onsets)\n if self.sign > 0:\n nparg = np.greater\n else:\n nparg = np.less\n if len(self.onsets) > 0: # original events\n# print('no: ', len(self.onsets))\n acceptlist = []\n for j in range(len(data[self.onsets])):\n if self.sign > 0 and self.eventstartthr is not None:\n if self.data[self.onsets[j]] < self.eventstartthr:\n continue\n if self.sign < 0 and self.eventstartthr is not None:\n if self.data[self.onsets[j]] > -self.eventstartthr:\n continue\n svwinlen = data[self.onsets[j]:(self.onsets[j]+mwin)].shape[0]\n if svwinlen > 11:\n svn = 11\n else:\n svn = svwinlen\n if svn % 2 == 0: # if even, decrease by 1 point to meet ood requirement for savgol_filter\n svn -=1\n \n if svn > 3: # go ahead and filter\n p = scipy.signal.argrelextrema(scipy.signal.savgol_filter(data[self.onsets[j]:(self.onsets[j]+mwin)], svn, 2), nparg, order=order)[0]\n else: # skip filtering\n p = scipy.signal.argrelextrema(data[self.onsets[j]:(self.onsets[j]+mwin)], nparg, order=order)[0]\n if len(p) > 0:\n self.peaks.extend([int(p[0]+self.onsets[j])])\n amp = self.sign*(self.data[self.peaks[-1]] - data[self.onsets[j]])\n\n self.amplitudes.extend([amp])\n i_end = i_decay_pts + self.onsets[j] # distance from peak to end\n i_end = min(ndata, i_end) # keep within the array limits\n if j < len(self.onsets)-1:\n if i_end > self.onsets[j+1]:\n i_end = self.onsets[j+1]-1 # only go to next event start\n move_avg, n = moving_average(data[self.onsets[j]:i_end], n=min(avgwin, len(data[self.onsets[j]:i_end])))\n if self.sign > 0:\n pk = np.argmax(move_avg) # find peak of smoothed data\n else:\n pk = np.argmin(move_avg)\n self.smoothed_peaks.extend([move_avg[pk]]) # smoothed peak\n self.smpkindex.extend([self.onsets[j]+pk])\n acceptlist.append(j)\n if len(acceptlist) < len(self.onsets):\n if verbose:\n print('Trimmed %d events' % (len(self.onsets)-len(acceptlist)))\n self.onsets = self.onsets[acceptlist] # trim to only the accepted values\n # print(self.onsets)\n self.avgevent, self.avgeventtb, self.allevents = self.average_events(self.onsets) \n if self.averaged:\n self.fit_average_event(self.avgeventtb, self.avgevent, debug=False)\n \n else:\n if verbose:\n print('No events found')\n return", "def click_time_series_aggregating(output_dir, ignore_cache):\n unit = get_unit_name()\n\n def single_sensor_label_from_topic(topic):\n split_topic = topic.split(\"/\")\n # return f\"{split_topic[1]}-{split_topic[-2]}/{split_topic[-1]}\"\n return f\"{split_topic[1]}-{split_topic[-1]}\"\n\n def unit_from_topic(topic):\n split_topic = topic.split(\"/\")\n return split_topic[1]\n\n raw135 = TimeSeriesAggregation( # noqa: F841\n \"pioreactor/+/+/od_raw/+/+\", # see note above about why we have no filter on experiment\n output_dir,\n experiment=UNIVERSAL_EXPERIMENT,\n job_name=\"od_raw_time_series_aggregating\",\n unit=unit,\n ignore_cache=ignore_cache,\n extract_label=single_sensor_label_from_topic,\n write_every_n_seconds=10,\n time_window_seconds=60\n * int(config[\"ui.overview.settings\"][\"raw_od_lookback_minutes\"]),\n record_every_n_seconds=5,\n )\n\n filtered135 = TimeSeriesAggregation( # noqa: F841\n \"pioreactor/+/+/od_filtered/+/+\",\n output_dir,\n experiment=UNIVERSAL_EXPERIMENT,\n job_name=\"od_filtered_time_series_aggregating\",\n unit=unit,\n ignore_cache=ignore_cache,\n extract_label=single_sensor_label_from_topic,\n write_every_n_seconds=10,\n time_window_seconds=60\n * int(config[\"ui.overview.settings\"][\"filtered_od_lookback_minutes\"]),\n record_every_n_seconds=4,\n )\n\n growth_rate = TimeSeriesAggregation( # noqa: F841\n \"pioreactor/+/+/growth_rate\",\n output_dir,\n experiment=UNIVERSAL_EXPERIMENT,\n job_name=\"growth_rate_time_series_aggregating\",\n unit=unit,\n ignore_cache=ignore_cache,\n extract_label=unit_from_topic,\n write_every_n_seconds=10,\n record_every_n_seconds=3 * 60, # TODO: move this to a config param\n )\n\n alt_media_fraction = TimeSeriesAggregation( # noqa: F841\n \"pioreactor/+/+/alt_media_calculating/alt_media_fraction\",\n output_dir,\n experiment=UNIVERSAL_EXPERIMENT,\n job_name=\"alt_media_fraction_time_series_aggregating\",\n unit=unit,\n ignore_cache=ignore_cache,\n extract_label=unit_from_topic,\n write_every_n_seconds=10,\n record_every_n_seconds=1,\n )\n\n while True:\n signal.pause()", "def export_events_gtf(self, edge):\n strand = self.gene.strand\n for event in self.positive_ids:\n full_event = '{}:{}'.format(self.etype, event)\n e_vals = full_event.replace('-', ':').split(':')\n\n line1 = self.gtf_string.format(int(e_vals[2]) - edge, e_vals[2], strand, full_event, full_event,\n 'alternative2')\n yield line1, self.etype\n\n line2 = self.gtf_string.format(e_vals[5], int(e_vals[5]) + edge, strand, full_event, full_event,\n 'alternative2')\n yield line2, self.etype\n\n line3 = self.gtf_string.format(int(e_vals[2]) - edge, e_vals[2], strand, full_event, full_event,\n 'alternative1')\n yield line3, self.etype\n\n line4 = self.gtf_string.format(e_vals[3], e_vals[4], strand, full_event, full_event, 'alternative1')\n yield line4, self.etype\n\n line5 = self.gtf_string.format(e_vals[5], int(e_vals[5]) + edge, strand, full_event, full_event,\n 'alternative1')\n yield line5, self.etype", "def draw_around_event(power,events,borders,eventName,maxY=1200):\n event_consider = events[events['eventName']==eventName].reset_index(drop=True)\n print(\"number of\", eventName ,\"in groudtruth=\",len(event_consider))\n i = 0\n while(i<len(event_consider)):\n date = time.mktime(datetime.strptime(event_consider['time'][i], \"%Y-%m-%d %H:%M:%S\").timetuple())\n start = str(datetime.fromtimestamp(date-borders[0]))\n end = str(datetime.fromtimestamp(date+borders[1]))\n print(date,start,end)\n i += 1\n serie = Series.from_array(power[(power['time']>=start)&(power['time']<=end)]['value'])\n if len(serie)>0:\n v = [serie.index[0], serie.index[len(serie)-1], 0, maxY]#xmin,xmax,ymin,ymax\n pyplot.figure(figsize=(20, 5))\n pyplot.plot(serie,'ro')\n pyplot.axis(v)\n pyplot.show()\n else:\n print(\"No data of power for this event\")", "def test_non_overlapping_events(sample_events, woodshop, caplog):\n caplog.set_level(logging.INFO)\n event1, event2 = sample_events.make_non_overlapping_events()\n woodshop.start_event(event1)\n woodshop.log_conflicts(event1.start_time)\n woodshop.end_event(event1)\n woodshop.log_conflicts(event1.end_time)\n woodshop.start_event(event2)\n woodshop.log_conflicts(event2.start_time)\n woodshop.end_event(event2)\n woodshop.log_conflicts(event2.end_time)\n assert caplog.text == \"\"", "def export_events_gtf(self, edge):\n strand = self.gene.strand\n for event in self.positive_ids:\n full_event = '{}:{}'.format(self.etype, event)\n e_vals = full_event.replace('-', ':').split(':')\n\n line1 = self.gtf_string.format(int(e_vals[2]) - edge, e_vals[3], strand, full_event, full_event,\n 'alternative1')\n yield line1, self.etype\n\n line3 = self.gtf_string.format(int(e_vals[2]) - edge, e_vals[2], strand, full_event, full_event,\n 'alternative2')\n yield line3, self.etype", "def plot_events1_triggered(self):\n\n result, selectedObservations = self.selectObservations(SELECT1)\n\n if not selectedObservations:\n return\n\n if not self.pj[OBSERVATIONS][selectedObservations[0]][EVENTS]:\n QMessageBox.warning(self, programName, \"There are no events in the selected observation\")\n return\n\n for obsId in selectedObservations:\n totalMediaLength = project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId])\n if totalMediaLength == -1:\n totalMediaLength = 0\n\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations, totalMediaLength)\n\n totalMediaLength = int(totalMediaLength)\n\n if not plot_parameters[\"selected subjects\"] or not plot_parameters[\"selected behaviors\"]:\n return\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"], selectedObservations,\n plot_parameters[\"selected behaviors\"])\n\n o = {}\n\n for subject in plot_parameters[\"selected subjects\"]:\n\n o[subject] = {}\n\n for behavior in plot_parameters[\"selected behaviors\"]:\n\n if plot_parameters[\"include modifiers\"]:\n\n cursor.execute(\"SELECT distinct modifiers FROM events WHERE subject = ? AND code = ?\",\n (subject, behavior))\n distinct_modifiers = list(cursor.fetchall())\n\n for modifier in distinct_modifiers:\n cursor.execute(\n \"SELECT occurence FROM events WHERE subject = ? AND code = ? AND modifiers = ? ORDER BY observation, occurence\",\n (subject, behavior, modifier[0]))\n\n rows = cursor.fetchall()\n\n if modifier[0]:\n behaviorOut = [behavior, modifier[0].replace(\"|\", \",\")]\n\n else:\n behaviorOut = [behavior]\n\n behaviorOut_json = json.dumps(behaviorOut)\n\n if not behaviorOut_json in o[subject]:\n o[subject][behaviorOut_json] = []\n\n for idx, row in enumerate(rows):\n if POINT in self.eventType(behavior).upper():\n o[subject][behaviorOut_json].append([row[0], row[0]]) # for point event start = end\n\n if STATE in self.eventType(behavior).upper():\n if idx % 2 == 0:\n try:\n o[subject][behaviorOut_json].append([row[0], rows[idx + 1][0]])\n except:\n if NO_FOCAL_SUBJECT in subject:\n sbj = \"\"\n else:\n sbj = \"for subject <b>{0}</b>\".format(subject)\n QMessageBox.critical(self, programName,\n \"The STATE behavior <b>{0}</b> is not paired {1}\".format(\n behaviorOut, sbj))\n else:\n cursor.execute(\n \"SELECT occurence FROM events WHERE subject = ? AND code = ? ORDER BY observation, occurence\",\n (subject, behavior))\n rows = list(cursor.fetchall())\n\n if not len(rows) and plot_parameters[\"exclude behaviors\"]:\n continue\n\n if STATE in self.eventType(behavior).upper() and len(rows) % 2:\n continue\n\n behaviorOut = [behavior]\n behaviorOut_json = json.dumps(behaviorOut)\n\n if not behaviorOut_json in o[subject]:\n o[subject][behaviorOut_json] = []\n\n for idx, row in enumerate(rows):\n if POINT in self.eventType(behavior).upper():\n o[subject][behaviorOut_json].append([row[0], row[0]]) # for point event start = end\n if STATE in self.eventType(behavior).upper():\n if idx % 2 == 0:\n o[subject][behaviorOut_json].append([row[0], rows[idx + 1][0]])\n\n if not plot_events.plot_time_ranges(self.pj,\n self.timeFormat,\n self.plot_colors,\n o,\n selectedObservations[0],\n plot_parameters[\"start time\"],\n plot_parameters[\"end time\"],\n plot_parameters[\"exclude behaviors\"],\n line_width=10):\n QMessageBox.warning(self, programName, \"Check events\")", "def summary_info_events(filename):\n # filename = self.out_filename('events')\n print('Reading {}'.format(filename))\n table = Table.read(str(filename), hdu='EVENTS')\n data = dict()\n\n # Copy over header info to the summary table\n data['RA_PNT'] = np.float32(table.meta['RA_PNT'])\n data['DEC_PNT'] = np.float32(table.meta['DEC_PNT'])\n # data['GLON_PNT'] = np.float32(table.meta['GLON_PNT'])\n # data['GLAT_PNT'] = np.float32(table.meta['GLAT_PNT'])\n data['ALT_PNT'] = np.float32(table.meta['ALT_PNT'])\n data['AZ_PNT'] = np.float32(table.meta['AZ_PNT'])\n #data['ZEN_PNT'] = np.float32(90. - table.meta['ALT_PNT'])\n data['ZEN_PNT'] = np.float32(90. - table['ALT'].mean())\n data['ONTIME'] = np.float32(table.meta['ONTIME'])\n data['LIVETIME'] = np.float32(table.meta['LIVETIME'])\n data['DEADC'] = np.float32(table.meta['DEADC'])\n\n MJDREFI = table.meta['MJDREFI']\n MJDREFF = table.meta['MJDREFF']\n MJDREF = MJDREFI + MJDREFF\n\n TSTART_MET = table.meta['TSTART'] / 3600. / 24.\n TSTOP_MET = table.meta['TSTOP'] / 3600. / 24.\n\n start_time = Time(MJDREF + TSTART_MET, scale='tt', format='mjd')\n stop_time = Time(MJDREF + TSTOP_MET, scale='tt', format='mjd')\n\n data['TSTART'] = np.float32(start_time.utc.mjd)\n data['TSTOP'] = np.float32(stop_time.utc.mjd)\n data['TSTART_STR'] = str(start_time.utc.iso[:-4])\n data['TSTOP_STR'] = str(stop_time.utc.iso[:-4])\n\n data['N_TELS'] = table.meta['N_TELS']\n data['TELLIST'] = table.meta['TELLIST']\n try:\n data['OBJECT'] = table.meta['OBJECT']\n except KeyError:\n data['OBJECT'] = \"\"\n data['RA_OBJ'] = np.float32(table.meta['RA_OBJ'])\n data['DEC_OBJ'] = np.float32(table.meta['DEC_OBJ'])\n\n # data['OBS_MODE'] = table.meta['OBS_MODE']\n\n try:\n data['MUONEFF'] = np.float32(table.meta['MUONEFF'])\n except KeyError:\n data['MUONEFF'] = np.float32(-1)\n\n # Calculate some summary statistics for important event columns\n data['EVENT_COUNT'] = len(table)\n data['EVENT_TIME_MIN'] = table['TIME'].min()\n data['EVENT_TIME_MAX'] = table['TIME'].max()\n data['EVENT_ENERGY_MEDIAN'] = np.float32(np.median(table['ENERGY']))\n data['EVENT_RA_MEDIAN'] = np.float32(np.median(table['RA']))\n data['EVENT_DEC_MEDIAN'] = np.float32(np.median(table['DEC']))\n\n return data", "def test150EventMultipleFileSplit(self):\n splitter = SplitterFactory()\n jobFactory = splitter(self.multipleFileSubscription)\n\n jobGroups = jobFactory(events_per_job=150,\n performance=self.performanceParams)\n\n self.assertEqual(len(jobGroups), 1)\n\n self.assertEqual(len(jobGroups[0].jobs), 10)\n\n for job in jobGroups[0].jobs:\n self.assertEqual(len(job.getFiles(type=\"lfn\")), 1)\n self.assertEqual(job[\"mask\"].getMaxEvents(), self.eventsPerJob)\n self.assertEqual(job[\"mask\"][\"FirstEvent\"], 0)\n self.assertEqual(job[\"mask\"][\"LastEvent\"], 99)", "def preProcess(self,filename,fileoutput):\t\n\tdata=[]\n\tval =set()\n\tfo = open(fileoutput, \"wb\")\n\twith open(filename) as data_file:\n \tfor tags in data_file:\n\t\t\tif \"timestamp\" not in tags: \n \t \t continue\n\t\t\tts = re.search('timestamp: (.+?)\\)', tags).group(1)\n\t\t\tval =set()\n\t\t\tval.update({tag for tag in tags.split() if tag.startswith(\"#\")})\n\t\t\t#print val\n\t\t\tif len(val) >1:\n\t\t\t\tself.maintainWindow(data,ts)\n\t\t\t\tdata.append((ts,val))\n\t\t\t\tself.createAdjList(val,\"add\")\n\t\t\t\tprint(\"***\")\n\t\t\telse:\n\t\t\t\tself.maintainWindow(data,ts)\n\t\t\t\tprint(\"@@@@\")\n\t\t\tresult = self.calculateRollingAverages() \n\t\t\tfo.write(result+\"\\n\")\n fo.close()\n data_file.close()", "def output_beat_to_file(file_name, e):\n print(\"Writing to file:\", file_name)\n routine = gp.compile(e,pset)\n with open(file_name+\".raw\",'w') as f:\n for t in range(200000):\n f.write(chr(int(routine(t+1))%256))\n # Now convert to wav\n subprocess.call(SOX_COMMAND + \" \" + file_name + \".raw\" + \" \" + file_name + \".wav\", shell=True)\n subprocess.call(LAME_COMMAND + \" \" + file_name + \".wav\", shell=True)", "def _output(events: Optional[Iterable[EventType]] = None, count: Optional[int] = None):\n if events is not None:\n print(\"[\", end=\"\")\n for i, event in enumerate(events):\n if i > 0:\n print(\",\", end=\"\")\n print(f\"{json.dumps(event)}\", end=\"\")\n print(\"]\")\n else:\n print(f'{{\"count\": {count}}}')", "def write_crossing_times(temperature, crossing_times_collector, file_name):\n with open(\"./Results/\" + file_name + \"-T{:.4f}.csv\".format(temperature), 'w') as f:\n for crossing_times in crossing_times_collector:\n f.write(\"%s\\n\" % \", \".join([str(element) for element in crossing_times]))", "def export_events_gtf(self, edge):\n strand = self.gene.strand\n for event in self.positive_ids:\n full_event = '{}:{}'.format(self.etype, event)\n e_vals = full_event.replace('-', ':').split(':')\n\n line1 = self.gtf_string.format(int(e_vals[2]) - edge, e_vals[2], strand, full_event, full_event,\n 'alternative2')\n yield line1, self.etype\n\n line2 = self.gtf_string.format(e_vals[3], e_vals[4], strand, full_event, full_event,\n 'alternative2')\n yield line2, self.etype\n\n line3 = self.gtf_string.format(int(e_vals[2]) - edge, e_vals[2], strand, full_event, full_event,\n 'alternative1')\n yield line3, self.etype\n\n line4 = self.gtf_string.format(e_vals[6], e_vals[7], strand, full_event, full_event,\n 'alternative1')\n yield line4, self.etype", "def make_point_sources_file(simput_prefix, phlist_prefix, exp_time, fov, \r\n sky_center, absorb_model=\"wabs\", nH=0.05, \r\n area=40000.0, prng=None, append=False, \r\n overwrite=False, input_sources=None, \r\n output_sources=None):\r\n events = make_ptsrc_background(exp_time, fov, sky_center, \r\n absorb_model=absorb_model, nH=nH, \r\n area=area, input_sources=input_sources, \r\n output_sources=output_sources, prng=prng)\r\n write_photon_list(simput_prefix, phlist_prefix, events[\"flux\"], \r\n events[\"ra\"], events[\"dec\"], events[\"energy\"], \r\n append=append, overwrite=overwrite)", "def create_activity_all(self, f_output='activity_all.txt'):\n list_tuple = []\n epoch = datetime.datetime.utcfromtimestamp(0) \n\n # For each records_*.csv, excluding records_{0,1,2,3,4}.csv\n regex = re.compile('records_.\\.csv')\n for filename in os.listdir(self.dir_name):\n if not re.match(regex, filename):\n if fnmatch.fnmatch(filename, 'records_*.csv'):\n path_to_file = self.dir_name + \"/\" + filename\n ret = subprocess.check_output(['wc', '-l', path_to_file])\n num = int(ret.split(' ')[0])\n # If follower has activity\n if num > 1:\n follower_id = filename.split('_')[1].split('.')[0]\n # Extract id of follower, get the anonymous number\n if follower_id in self.map_userid_number:\n follower_num = self.map_userid_number[follower_id]\n # Parse through file\n f = open(path_to_file,'r')\n # Skip first line\n f.readline()\n for line in f:\n line_split = line.split(',')\n # Extract the time of post, create the pair\n # year-month-day-hour-min-second (UTC - 4)\n date_and_time = line_split[1]\n dt_local = datetime.datetime.strptime(date_and_time, '%Y-%m-%d-%H:%M:%S')\n dt_utc = dt_local + datetime.timedelta(hours=4)\n seconds = (dt_utc - epoch).total_seconds()\n list_tuple.append((seconds,follower_num)) \n # Now append the bot activity\n for bot_id in range(0,5):\n print bot_id\n filename = \"records_%d.csv\" % bot_id\n path_to_file = self.dir_name + \"/\" + filename\n f = open(path_to_file, 'r')\n # Skip first line\n f.readline()\n for line in f:\n line_split = line.split(',')\n # Extract time of post, create the pair\n date_and_time = line_split[1]\n dt_local = datetime.datetime.strptime(date_and_time, '%Y-%m-%d-%H-%M-%S')\n dt_utc = dt_local + datetime.timedelta(hours=4)\n seconds = (dt_utc - epoch).total_seconds()\n list_tuple.append((seconds, bot_id+1))\n\n # Sort all pairs based on time of post\n list_tuple.sort()\n # Write f_output\n f_write = open(f_output, 'w')\n for t in list_tuple:\n f_write.write(\"%d %d\\n\" % (t[0], t[1]))\n f_write.close()", "def events(self) -> [redirect, HTMLBody]:\n\t\t# Get all events and split into 2 groups\n\t\teventsl, eventsr = prepare_events(get_events())\n\t\treturn render_template(\"events.jinja2\", eventsl=eventsl, eventsr=eventsr)", "def test_1000_populations_with_activity_12perday_should_yield_60k_logs_in_5days():\n\n with path.tempdir() as log_parent_folder:\n log_folder = os.path.join(log_parent_folder, \"logs\")\n\n # note that we cannot have clock_step > 2h since that\n run_test_scenario_1(clock_step=\"1h\",\n simulation_duration=\"5 days\",\n n_stories=12,\n per=pd.Timedelta(\"1day\"),\n log_folder=log_folder)\n\n logging.info(\"loading produced logs\")\n logs = load_all_logs(log_folder)[\"the_logs\"]\n\n logging.info(\"number of produced logs: {} logs\".format(logs.shape[0]))\n\n # 5 days of simulation should produce 1000 * 12 * 5 == 60k logs\n assert 55e3 <= logs.shape[0] <= 65e3", "def send_logs(self):\n for i in range(30):\n with open('{}-{}.log'.format(self._log_file_path, i), 'a') as log_file:\n for _ in range(self._log_rate):\n log_file.write(self._log_record + '\\n')" ]
[ "0.6087541", "0.59404063", "0.58714825", "0.5870363", "0.58608985", "0.57862127", "0.57703376", "0.5700037", "0.5650298", "0.56280893", "0.5617199", "0.56035525", "0.5598488", "0.5553481", "0.55442035", "0.5541307", "0.5524684", "0.55119693", "0.5496372", "0.5496372", "0.54935104", "0.54563636", "0.5414372", "0.54072356", "0.53711987", "0.5353537", "0.5346704", "0.53401476", "0.53386515", "0.53324944", "0.5325599", "0.53225356", "0.5321502", "0.5317875", "0.52816266", "0.5279892", "0.5266815", "0.5259597", "0.52519244", "0.5248542", "0.5235403", "0.5231735", "0.5219549", "0.5219303", "0.52073854", "0.51970255", "0.51950127", "0.51920044", "0.51689994", "0.51606613", "0.51481146", "0.51337045", "0.513016", "0.51170504", "0.5109368", "0.5106234", "0.50858074", "0.5084599", "0.50819147", "0.50742835", "0.50732356", "0.5072462", "0.5064467", "0.5057986", "0.50564414", "0.50519246", "0.5048024", "0.50366586", "0.503627", "0.5027848", "0.5022266", "0.5016967", "0.5005473", "0.5000496", "0.4997456", "0.4996114", "0.49957532", "0.49952665", "0.4992327", "0.49874935", "0.49798965", "0.49797162", "0.4978258", "0.49722314", "0.49670854", "0.49609253", "0.49478143", "0.49472043", "0.49457318", "0.49348205", "0.49315718", "0.49298373", "0.49264848", "0.49230295", "0.49211657", "0.4916593", "0.49159035", "0.4915324", "0.49109265", "0.49047217" ]
0.6893672
0
given lists of peak fluxes for protons >10 MeV and >100 MeV, creates a boolean for whether or not each event is a subevent (doesn't cross a threshold)
def gen_subevent_bools(p_10,p_100): #list of subevent booleans subevent_bools = [] #extracting 10 MeV peak flux if it exists for j in range(len(p_10)): try: p10 = float(p_10[j]) except ValueError: p10 = 'nan' #extracting 100 MeV peak flux if it exists try: p100 = float(p_100[j]) except ValueError: p100 = 'nan' #checking if peak fluxes exist if str(p10) != 'nan' and str(p100) != 'nan': #if the peak fluxes both exist and >10 MeV is both below threshold, #subevent is true (only care about >10 bc of definition of subevent) if p10 < 10: subevent_bools.append(True) elif p10 > 10: subevent_bools.append(False) #if >10 MeV doesn't exist, subevent is true else: subevent_bools.append(True) return(subevent_bools)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def spec_to_peaks(data, value, fp = iterate_structure(generate_binary_structure(rank = 2, connectivity=2), 10)):\n\n max_arr = maximum_filter(data, footprint = fp)\n return (data == max_arr) & (data > value)", "def check_overlaps(self, verbose = False):\n if hasattr(self.phot, \"data\") and hasattr(self, 'spec'):\n for i, spectrum in enumerate(self.spec):\n if verbose:print(i, spectrum)\n for j, filtername in enumerate(self.phot.data_filters):\n if verbose:print(j, filtername)\n\n if hasattr(self.phot.data_filters[filtername], \"_lower_edge\") and \\\n hasattr(self.phot.data_filters[filtername], \"_upper_edge\") and \\\n hasattr(self.spec[spectrum], \"data\"):\n blue_bool = self.phot.data_filters[filtername]._lower_edge > self.spec[spectrum].min_wavelength\n red_bool = self.phot.data_filters[filtername]._upper_edge < self.spec[spectrum].max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n\n if verbose:print(within)\n if within:\n self.spec[spectrum]._add_to_overlapping_filters(filtername, verbose=verbose)\n else:\n warnings.warn(\"SNClass.check_overlaps - something went wrong... no data?\")\n pass", "def contains_suitable_peak(self):\n if not self.contains_peak:\n return False\n\n idx = self.product_idx(product=self.final_species)\n if idx is not None and self[idx].energy < self[self.peak_idx].energy:\n logger.info('Products made and have a peak. Assuming suitable!')\n return True\n\n # Products aren't made by isomorphism but we may still have a suitable\n # peak..\n if any(self[-1].constraints[b.atom_indexes] == b.final_dist\n for b in self.bonds):\n logger.warning('Have a peak, products not made on isomorphism, but'\n ' at least one of the distances is final. Assuming '\n 'the peak is suitable ')\n return True\n\n return False", "def check_recon_overlaps(self, verbose = False):\n if hasattr(self.phot, \"data\") and hasattr(self, 'recon_spec'):\n for i, spectrum in enumerate(self.recon_spec):\n if verbose:print(i, spectrum)\n for j, filtername in enumerate(self.phot.data_filters):\n if verbose:print(j, filtername)\n\n if hasattr(self.phot.data_filters[filtername], \"_lower_edge\") and \\\n hasattr(self.phot.data_filters[filtername], \"_upper_edge\") and \\\n hasattr(self.recon_spec[spectrum], \"data\"):\n blue_bool = self.phot.data_filters[filtername]._lower_edge > self.recon_spec[spectrum].min_wavelength\n red_bool = self.phot.data_filters[filtername]._upper_edge < self.recon_spec[spectrum].max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n\n if verbose:print(within)\n if within:\n self.recon_spec[spectrum]._add_to_overlapping_filters(filtername)\n else:\n warnings.warn(\"SNClass.check_sim_overlaps - something went wrong... no data?\")\n pass", "def test_peak_detection(self):\n from sms.models import utilFunctions # pylint: disable=C0415\n\n for i, (mx, _) in enumerate(self.sm.dft_frames(self.x)):\n ploc = sample_dsp.peak_detect(mx, self.sm.t)\n ploc_sms = utilFunctions.peakDetection(mx, self.sm.t)\n for j, (p, p_s) in enumerate(itertools.zip_longest(ploc, ploc_sms)):\n with self.subTest(frame=i, peak_n=j):\n self.assertEqual(p, p_s)", "def check_star(peaks,data):\n star = 0\n for i in peaks:\n max = data[i]\n if i<3 or i+4>data.size:\n continue\n mean = data[i-3:i+4].mean()\n if (max-mean)<0.1*max:\n star += 1\n if star*2>peaks.size:\n return True\n else:\n return False", "def isPeakAssigned(peak, fully=True):\n\n n = 0\n for peakDim in peak.peakDims:\n if len(peakDim.peakDimContribs) > 0:\n n +=1\n \n if n == len(peak.peakDims):\n return True\n \n elif n > 0:\n if fully:\n return False\n else:\n return True\n \n else:\n return False", "def check_sim_overlaps(self, verbose = False):\n if hasattr(self.phot, \"data\") and hasattr(self, 'spec'):\n for i, spectrum in enumerate(self.sim_spec):\n if verbose:print(i, spectrum)\n for j, filtername in enumerate(self.phot.data_filters):\n if verbose:print(j, filtername)\n\n if hasattr(self.phot.data_filters[filtername], \"_lower_edge\") and \\\n hasattr(self.phot.data_filters[filtername], \"_upper_edge\") and \\\n hasattr(self.sim_spec[spectrum], \"data\"):\n blue_bool = self.phot.data_filters[filtername]._lower_edge > self.sim_spec[spectrum].min_wavelength\n red_bool = self.phot.data_filters[filtername]._upper_edge < self.sim_spec[spectrum].max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n\n if verbose:print(within)\n if within:\n self.sim_spec[spectrum]._add_to_overlapping_filters(filtername, verbose=verbose)\n else:\n warnings.warn(\"SNClass.check_sim_overlaps - something went wrong... no data?\")\n pass", "def measure_peak(sig, use_inflection=True, return_allinfo=False):\n sig = np.array(sig)\n cr = locate_peak(sig)\n cr_crosszero = np.zeros_like(cr)\n cr_inflection = np.zeros_like(cr)\n\n # cross zero points\n cr_cr1 = -int_sign(sig[1:] * sig[:-1])\n cr_cr2 = -int_sign(sig[:-1] * sig[1:])\n cr_cr1[cr_cr1<0] = 0\n cr_cr2[cr_cr2<0] = 0\n cr_crosszero[1:] = cr_cr1\n cr_crosszero[:-1] += cr_cr2\n cr_crosszero = int_sign(cr_crosszero * sig) * 4\n\n # inflection points\n d2 = second_derivate(sig)\n d2p = locate_peak(d2)\n d2p[np.where( np.abs(d2p) != 1 )] = 0\n d2p[np.where( ((d2p==1) & (sig<0)) | ((d2p==-1) & (sig>0)) )] = 0\n cr_inflection[np.where(d2p==-1)] = 8\n cr_inflection[np.where(d2p==1)] = -8\n \n if use_inflection:\n cr_combine = cr + cr_inflection + cr_crosszero \n else:\n cr_combine = cr + cr_crosszero\n\n oned = False\n if len(np.shape(sig)) == 1:\n oned = True\n sig = sig[:, np.newaxis]\n \n peaks_list = []\n for i in range(np.shape(sig)[1]):\n pvs = np.where(np.abs(cr[:,i]) == 1)[0]\n lims = np.where(np.abs(cr_combine[:,i]) >= 2)[0]\n if len(pvs) == 0 :\n peaks_list.append([])\n continue\n if np.shape(lims)[0] == 0:\n lower_pos = pvs\n upper_pos = pvs\n else:\n lower_arr = (pvs > lims[:, np.newaxis])\n upper_arr = (pvs < lims[:, np.newaxis])\n lower_arr_r = np.flipud(lower_arr)\n upper_pos_i = np.argmax(upper_arr, axis=0)\n upper_pos = lims[(upper_pos_i, )]\n w_upper_none = np.where(upper_arr[-1,:] == False)\n upper_pos[w_upper_none] = pvs[w_upper_none]\n lower_pos_r_i = np.argmax(lower_arr_r, axis=0)\n lower_pos_i = len(lims) - 1 - lower_pos_r_i\n lower_pos = lims[(lower_pos_i, )]\n w_lower_none = np.where(lower_arr[0, :] == False)\n lower_pos[w_lower_none] = 0\n\n peaks = []\n for center, lower, upper in zip(pvs, lower_pos, upper_pos):\n depth = sig[center, i]\n sig_range = sig[lower:upper+1, i]\n sig_range[np.where(int_sign(sig_range) != int_sign(depth))] = 0.0\n volume = np.sum(sig_range)\n peaks.append(Peak(center=center, lower=lower, upper=upper, depth=depth, volume=volume))\n peaks_list.append(peaks)\n if oned:\n peaks_list = peaks_list[0]\n \n if return_allinfo:\n return peaks_list, cr, cr_crosszero, cr_inflection \n else:\n return peaks_list", "def event_overlap(labels, half, timestamp, window):\n\n for l, _ in labels:\n if l[0] == half:\n ceil = l[1] + window//2\n floor = l[1] - window//2\n if timestamp <= ceil and timestamp >= floor:\n return True\n return False", "def test_peak_refinement(self):\n from sms.models import utilFunctions # pylint: disable=C0415\n\n for i, (mx, px) in enumerate(self.sm.dft_frames(self.x)):\n ploc = sample_dsp.peak_detect(mx, self.sm.t)\n ploc_i, pmag_i, pph_i = sample_dsp.peak_refine(ploc, mx, px) # pylint: disable=W0632\n ploc_i_sms, pmag_i_sms, pph_i_sms = utilFunctions.peakInterp(mx, px, ploc)\n with self.subTest(frame=i, value=\"location\"):\n self.assert_almost_equal_rmse(ploc_i, ploc_i_sms)\n with self.subTest(frame=i, value=\"magnitude\"):\n self.assert_almost_equal_rmse(pmag_i, pmag_i_sms)\n with self.subTest(frame=i, value=\"phase\"):\n self.assert_almost_equal_rmse(pph_i, pph_i_sms)", "def isPossibleSubsumer(self):\n if self.action_cnt > cons.theta_sub and self.error < cons.err_sub: #self.prediction < cons.err_sub: (why does it work?)\n return True\n return False", "def is_subset_of(self, uspec):\n \n if self.is_power_onoff() or uspec.is_power_onoff():\n return False\n \n if (uspec.is_bias() or not uspec.is_calib()) and self['speed'] != uspec['speed']:\n return False\n\n if int(self['x_bin']) % int(uspec['x_bin']) != 0 or int(self['y_bin']) % int(uspec['y_bin']) != 0:\n return False\n\n if self.number_windows() > 0:\n\n if not uspec.contains_window(self['x1_start'], self['y1_start'], self['x1_size'], self['y1_size'], self['x_bin'], self['y_bin']):\n return False\n\n if self.number_windows() > 1:\n\n if not uspec.contains_window(self['x2_start'], self['y2_start'], self['x2_size'], self['y2_size'], self['x_bin'], self['y_bin']):\n return False\n\n return True", "def whichPeaks(trace):\n peaks = []\n df = np.diff(trace)\n for t in range(len(df)-4):\n if df[t] > 0 and df[t+1] > 0:\n if df[t+2] < 0 and df[t+3] < 0: # Potential peak\n if trace[t+2] > np.mean(trace):\n peaks.append([t+2, trace[t+2]])\n return peaks", "def testPeakLikelihoodFlux(self):\n # make mp: a flux measurer\n measControl = measAlg.PeakLikelihoodFluxControl()\n schema = afwTable.SourceTable.makeMinimalSchema()\n mp = measAlg.MeasureSourcesBuilder().addAlgorithm(measControl).build(schema)\n \n # make and measure a series of exposures containing just one star, approximately centered\n bbox = afwGeom.Box2I(afwGeom.Point2I(0, 0), afwGeom.Extent2I(100, 101))\n kernelWidth = 35\n var = 100\n fwhm = 3.0\n sigma = fwhm/FwhmPerSigma\n convolutionControl = afwMath.ConvolutionControl()\n psf = measAlg.SingleGaussianPsf(kernelWidth, kernelWidth, sigma)\n psfKernel = psf.getLocalKernel()\n psfImage = psf.computeKernelImage()\n sumPsfSq = numpy.sum(psfImage.getArray()**2)\n psfSqArr = psfImage.getArray()**2\n for flux in (1000, 10000):\n ctrInd = afwGeom.Point2I(50, 51)\n ctrPos = afwGeom.Point2D(ctrInd)\n\n kernelBBox = psfImage.getBBox(afwImage.PARENT)\n kernelBBox.shift(afwGeom.Extent2I(ctrInd))\n\n # compute predicted flux error\n unshMImage = makeFakeImage(bbox, [ctrPos], [flux], fwhm, var)\n\n # filter image by PSF\n unshFiltMImage = afwImage.MaskedImageF(unshMImage.getBBox(afwImage.PARENT))\n afwMath.convolve(unshFiltMImage, unshMImage, psfKernel, convolutionControl)\n \n # compute predicted flux = value of image at peak / sum(PSF^2)\n # this is a sanity check of the algorithm, as much as anything\n predFlux = unshFiltMImage.getImage().get(ctrInd[0], ctrInd[1]) / sumPsfSq\n self.assertLess(abs(flux - predFlux), flux * 0.01)\n \n # compute predicted flux error based on filtered pixels\n # = sqrt(value of filtered variance at peak / sum(PSF^2)^2)\n predFluxErr = math.sqrt(unshFiltMImage.getVariance().get(ctrInd[0], ctrInd[1])) / sumPsfSq\n\n # compute predicted flux error based on unfiltered pixels\n # = sqrt(sum(unfiltered variance * PSF^2)) / sum(PSF^2)\n # and compare to that derived from filtered pixels;\n # again, this is a test of the algorithm\n varView = afwImage.ImageF(unshMImage.getVariance(), kernelBBox)\n varArr = varView.getArray()\n unfiltPredFluxErr = math.sqrt(numpy.sum(varArr*psfSqArr)) / sumPsfSq\n self.assertLess(abs(unfiltPredFluxErr - predFluxErr), predFluxErr * 0.01)\n \n for fracOffset in (afwGeom.Extent2D(0, 0), afwGeom.Extent2D(0.2, -0.3)):\n adjCenter = ctrPos + fracOffset\n if fracOffset == (0, 0):\n maskedImage = unshMImage\n filteredImage = unshFiltMImage\n else:\n maskedImage = makeFakeImage(bbox, [adjCenter], [flux], fwhm, var)\n # filter image by PSF\n filteredImage = afwImage.MaskedImageF(maskedImage.getBBox(afwImage.PARENT))\n afwMath.convolve(filteredImage, maskedImage, psfKernel, convolutionControl)\n\n exposure = afwImage.makeExposure(filteredImage)\n exposure.setPsf(psf)\n \n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n mp.apply(source, exposure, afwGeom.Point2D(*adjCenter))\n measFlux = source.get(measControl.name)\n measFluxErr = source.get(measControl.name + \".err\")\n self.assertFalse(source.get(measControl.name + \".flags\"))\n self.assertLess(abs(measFlux - flux), flux * 0.003)\n \n self.assertLess(abs(measFluxErr - predFluxErr), predFluxErr * 0.2)\n\n # try nearby points and verify that the flux is smaller;\n # this checks that the sub-pixel shift is performed in the correct direction\n for dx in (-0.2, 0, 0.2):\n for dy in (-0.2, 0, 0.2):\n if dx == dy == 0:\n continue\n offsetCtr = afwGeom.Point2D(adjCenter[0] + dx, adjCenter[1] + dy)\n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n mp.apply(source, exposure, offsetCtr)\n offsetFlux = source.get(measControl.name)\n self.assertLess(offsetFlux, measFlux)\n \n # source so near edge of image that PSF does not overlap exposure should result in failure\n \n for edgePos in (\n (1, 50),\n (50, 1),\n (50, bbox.getHeight() - 1),\n (bbox.getWidth() - 1, 50),\n ):\n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n mp.apply(source, exposure, afwGeom.Point2D(*edgePos))\n self.assertTrue(source.get(measControl.name + \".flags\"))\n \n # no PSF should result in failure: flags set\n noPsfExposure = afwImage.ExposureF(filteredImage)\n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n mp.apply(source, noPsfExposure, afwGeom.Point2D(*adjCenter))\n self.assertTrue(source.get(measControl.name + \".flags\"))", "def _is_mlc_peak_in_window(\n self, window, height_threshold, edge_threshold, picket_peak_val\n ) -> bool:\n if self.orientation == Orientation.UP_DOWN:\n std = np.std(window, axis=1)\n else:\n std = np.std(window, axis=0)\n is_above_height_threshold = np.max(window) > height_threshold * picket_peak_val\n is_not_at_edge = max(std) < edge_threshold * np.median(std)\n return is_above_height_threshold and is_not_at_edge", "def check_peak_win(self):\n if self.peak_win[0] < 0.0:\n self.peak_win[0] = 0.0\n if self.logger is not None:\n self.logger.warning(('Start of peak window < 0 sec for cond: {}. ' +\n 'Setting to 0.').format(self.cond))\n if self.peak_win[1] > self.psc_dur:\n self.peak_win[1] = self.psc_dur\n if self.logger is not None:\n logger.warning(('End of peak window is longer than trial HRF ' +\n 'for cond: {}. Truncating.').format(self.cond))\n return", "def check_overlaps(self, filter_objects, verbose = False):\n if isinstance(FilterClass, type(filter_objects)):\n ## if only one filter is given\n filter_objects = [filter_objects, ]\n\n\n for i, filter_name in enumerate(filter_objects):\n if isinstance(FilterClass, type(filter_name)):\n filter_obj = filter_name\n elif isinstance(filter_objects, dict):\n filter_obj = filter_objects[filter_name]\n else:\n filter_obj = filter_objects[i]\n\n if verbose:print(i, filter_obj)\n\n if hasattr(filter_obj, \"_lower_edge\") and \\\n hasattr(filter_obj, \"_upper_edge\") and \\\n hasattr(self, \"data\"):\n blue_bool = filter_obj._lower_edge > self.min_wavelength\n red_bool = filter_obj._upper_edge < self.max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n if verbose: print(within)\n if within:\n self._add_to_overlapping_filters(filter_name, verbose=verbose)\n else:\n warnings.warn(\"SpectrumClass.check_overlaps - something went wrong... no overlaps or data?\")\n if self._n_overlapping_filters == 1:\n self._overlapping_filter_list = [self._overlapping_filter_list,] ## added to fix issue #27\n pass", "def spectral_maxpeaks(sign, FS):\n f, ff = plotfft(sign, FS)\n diff_sig = np.diff(ff)\n\n return np.sum([1 for nd in range(len(diff_sig[:-1])) if (diff_sig[nd+1]<0 and diff_sig[nd]>0)])", "def __isScanContained(self, subms, scanlist, tbin):\n isContained = False \n \n mymsmd = msmdtool()\n mymsmd.open(subms)\n \n # Check if subms scans contain all selected scans\n hasScans = False\n s = mymsmd.scannumbers()\n subms_scans = map(str, s)\n if set(scanlist) <= set(subms_scans):\n hasScans = True\n \n if hasScans:\n t = mymsmd.timesforscans(s)\n mymsmd.close()\n t_range = t.max() - t.min()\n \n if t_range >= tbin: \n isContained = True\n \n return isContained", "def identify_flux(xyz: list) -> list:\n flagged_lines = [tup for tup in xyz if abs(tup[3]) > THRESHOLDS[0] and abs(tup[4]) > THRESHOLDS[1]]\n\n return flagged_lines", "def isSingleParticle(self):\r\n\r\n\t\tindex_of_maximum = np.argmax(self.scatData) #get the peak position\r\n\t\trun = 55. #define the run to use\r\n\t\t\r\n\t\tleft_rise = self.scatData[index_of_maximum]-self.scatData[index_of_maximum-int(run)] #get the rise from posn 10 to the peak\r\n\t\tleft_slope = left_rise/run\r\n\t\t\r\n\t\ttry:\r\n\t\t\tright_rise = self.scatData[index_of_maximum]-self.scatData[index_of_maximum+int(run)] #get the rise from a point the same distance away from teh peak as position 10, but on the other side\r\n\t\t\tright_slope = right_rise/run\r\n\t\texcept:\r\n\t\t\treturn\r\n\t\t\t\r\n\t\tpercent_diff = np.absolute((right_slope-left_slope)/(0.5*right_slope+0.5*left_slope))\r\n\t\tif percent_diff > 0.1:\r\n\t\t\tself.doublePeak = True", "def check_overlap(current, hit, overlap = 200):\n for prev in current:\n p_coords = prev[2:4]\n coords = hit[2:4]\n if get_overlap(coords, p_coords) >= overlap:\n return True\n return False", "def analyze_ev_wf_compact(self, event, n_bsl, pic_name, peak_height=0.001, peak_prominences=0.0001):\n\n fig, ax = plt.subplots(nrows=3, ncols=3)\n peaks_temp = pd.DataFrame()\n\n for i in range(0, 9):\n if event < len(self.table_sipm_time):\n # Creo un np.array con gli indici della singola waveform..\n wf_idx = [event*self.points_per_wf, event *\n self.points_per_wf+self.points_per_wf]\n # ..i tempi di ciascun punto..\n wf_time = self.table_sipm_time['t'].iloc[event] + \\\n self.table_sipm_wf['TIME'][int(wf_idx[0]):int(wf_idx[1])]\n # ..e i valori del segnale di ciascun ppunto\n wf_ch = - \\\n self.table_sipm_wf['CH1'][int(wf_idx[0]):int(wf_idx[1])]\n\n # Per trovare la baseline, faccio un fit polinomiale di grado 0..\n # ..su un numero finito di punti iniziali, specificato dall'utente..\n # ..poi la salvo internamente alla classe\n self.baseline = np.polyfit(\n wf_time[0:n_bsl], wf_ch[0:n_bsl], 0)[0]\n # Voglio anche disegnarla sui plot, quindi mi creo una lista di x e di y..\n # ..nello spazio della waveform\n bsl_time = wf_time[0:n_bsl]\n bsl_ch = [self.baseline] * n_bsl\n\n # Per trovre i picchi, uso la funzione find_peaks di scipy.signal\n # I valori di height e prominence sono specificati dall'utente..\n # ..e scalti per selezionare tutti i picchi senza prendere rumore\n peaks, _ = sp.find_peaks(\n wf_ch, height=peak_height, prominence=peak_prominences)\n\n # Ora posso plottare tutto:\n plt.ticklabel_format(axis='x', style='sci', scilimits=(0, 0))\n # la waveform..\n ax[int(i / 3)][i % 3].plot(wf_time,\n wf_ch, linestyle='-', linewidth=1)\n # ..la baseline..\n ax[int(i / 3)][i % 3].plot(bsl_time, bsl_ch, linestyle='-',\n linewidth=1, c='darkgreen')\n # ..e i picchi (se ci sono)\n if len(peaks) > 0:\n ax[int(i / 3)][i % 3].scatter(wf_time.iloc[peaks],\n wf_ch.iloc[peaks], c='darkred')\n\n # Set common labels\n fig.text(0.5, 0.01, 'Time (s)', ha='center', va='center')\n fig.text(0.02, 0.5, 'Amplitude (V)', ha='center', va='center', rotation='vertical')\n \n \n # plt.show()\n peaks_temp = pd.concat([peaks_temp, pd.DataFrame(\n {'t': wf_time.iloc[peaks], 'A': wf_ch.iloc[peaks]-self.baseline})], ignore_index=True)\n event += 1\n\n # ..e salvo il plot in una cartella a parte\n folder_name = 'plot'\n plot_name = '{0}/{1}_ev{2}.png'.format(\n folder_name, pic_name, event)\n fig.savefig(plot_name)\n plt.close(fig)\n\n # La funzione restituisce i valori di tempo e ampiezza (ottenuta come Ch1-baseline)..\n # ..agli indici dei massimi trovati da find_peaks\n return peaks_temp", "def is_within_phase_space(self, events: np.ndarray) -> Tuple[bool]:\n raise NotImplementedError", "def global_peak(apsp, sfield, peaks, n_size=5):\n\n peak_map = {p: None for p in peaks}\n corr_map = {p: None for p in peaks}\n\n for p in peaks:\n\n idx = (apsp[p, :]<=n_size)\n peak_map[p] = sfield[idx].mean()\n corr_map[p] = sfield[p]\n\n maxima = max(peak_map, key=peak_map.get)\n\n return [maxima, peak_map]", "def detect_min_max(arr):\n\n max_value = max(np.absolute(np.reshape(arr, -1)))\n peaks_max = []\n peaks_min = []\n x_max = []\n y_max = []\n z_max = []\n x_min = []\n y_min = []\n z_min = []\n\n for j1 in range(10, arr.shape[0]-10):\n for j2 in range(10, arr.shape[1]-10):\n for j3 in range(10, arr.shape[2]-10):\n if (np.absolute(arr[j1, j2, j3]) > 0.3*max_value):\n\n aaaa = [\n arr[j1, j2, j3 + 1], arr[j1, j2 + 1, j3],\n arr[j1 + 1, j2, j3], arr[j1, j2, j3 - 1],\n arr[j1, j2 - 1, j3], arr[j1 - 1, j2, j3],\n arr[j1 + 1, j2 + 1, j3 + 1],\n arr[j1 - 1, j2 - 1, j3 - 1],\n arr[j1 - 1, j2 + 1, j3 + 1], arr[j1, j2 + 1, j3 + 1],\n arr[j1, j2 - 1, j3 - 1], arr[j1, j2 - 1, j3 + 1],\n arr[j1, j2 + 1, j3 - 1], arr[j1 + 1, j2, j3 + 1],\n arr[j1 - 1, j2, j3 - 1], arr[j1 - 1, j2, j3 + 1],\n arr[j1 + 1, j2, j3 - 1], arr[j1 + 1, j2 + 1, j3],\n arr[j1 - 1, j2 - 1, j3], arr[j1 + 1, j2 - 1, j3],\n arr[j1 - 1, j2 + 1, j3], arr\n [j1 + 1, j2 - 1, j3 + 1], arr\n [j1 + 1, j2 + 1, j3 - 1], arr\n [j1 - 1, j2 - 1, j3 + 1], arr\n [j1 + 1, j2 - 1, j3 - 1], arr\n [j1 - 1, j2 + 1, j3 - 1]]\n bbbb = [\n arr[j1, j2, j3 + 9], arr[j1, j2 + 9, j3],\n arr[j1 + 9, j2, j3], arr[j1, j2, j3 - 9],\n arr[j1, j2 - 9, j3], arr[j1 - 9, j2, j3]]\n\n if ((arr[j1, j2, j3] > max(aaaa)) and (max(aaaa) > max(bbbb))):\n peaks_max = np.append(peaks_max, arr[j1, j2, j3])\n x_max = np.append(x_max, j1)\n y_max = np.append(y_max, j2)\n z_max = np.append(z_max, j3)\n\n if ((arr[j1, j2, j3] < min(aaaa)) and (min(aaaa) < min(bbbb))):\n peaks_min = np.append(peaks_min, arr[j1, j2, j3])\n x_min = np.append(x_min, j1)\n y_min = np.append(y_min, j2)\n z_min = np.append(z_min, j3)\n\n return peaks_min, np.vstack(\n (x_min, y_min, z_min)), peaks_max, np.vstack(\n (x_max, y_max, z_max))", "def is_coelution(spectrum_in, ms2_precursor, da_after_precursor = 1.3, delta_mz = 0.03, percentage_intensity_not_coelution = 10, percentage_accetable_coelution = False):\n\n upper_mz = ms2_precursor + da_after_precursor\n\n precursor_mz_upper = ms2_precursor + delta_mz\n precursor_mz_lower = ms2_precursor - delta_mz\n\n # Ion +1 to ignore in the spectrum\n ignore_peak_mz = ms2_precursor + 1\n ignore_upper_mz = ignore_peak_mz + delta_mz\n ignore_lower_mz = ignore_peak_mz - delta_mz\n\n peaks = spectrum_in.get_peaks()\n reverse_peaks = reversed(peaks)\n\n position = 0\n for peak in reverse_peaks:\n mz = peak.get_mz()\n\n if mz <= precursor_mz_upper and mz >= precursor_mz_lower:\n precursor_mz = mz\n precursor_intensity = peak.get_intensity()\n precursor_peak = peak\n # print(\"Found precursor in MS1: Mz:\", precursor_mz, \"Intensity:\", precursor_intensity)\n break\n position += 1\n\n # print(spectrum_in.get_size())\n position = spectrum_in.get_size() - position\n\n # Intensity of peak to consider as coelution calculation\n # Below this threshold, nothing is considered coelution\n not_coelution_threshold = precursor_intensity * percentage_intensity_not_coelution / 100\n # Below this threshold, coelution is considered acceptable\n if percentage_accetable_coelution != False:\n acceptable_coelution_threshold = precursor_intensity * percentage_accetable_coelution / 100\n\n acceptable_coelution = list()\n proper_coelution = list()\n coelution = [proper_coelution, acceptable_coelution, precursor_peak]\n\n for peak in peaks[position:]:\n mz = peak.get_mz()\n\n if mz < upper_mz:\n \n # We search for peaks different to the ion +1\n if mz > ignore_upper_mz or mz < ignore_lower_mz:\n intensity = peak.get_intensity()\n \n if intensity > not_coelution_threshold:\n \n if percentage_accetable_coelution == False:\n coelution[0].append(peak)\n\n else:\n \n if intensity > acceptable_coelution_threshold:\n coelution[0].append(peak)\n else:\n coelution[1].append(peak) \n\n else:\n break\n\n \"\"\"\n print(\"Coelution_list\")\n print(\"Proper_coelution:\", end=\"\")\n for peak in coelution[0]:\n print(\"MZ:\", peak.get_mz(), \"Intensity\", peak.get_intensity(), end=\",\")\n print(\"\\nAcceptable_coelution:\", end=\"\")\n for peak in coelution[1]:\n print(\"MZ:\", peak.get_mz(), \"Intensity\", peak.get_intensity(), end=\",\")\n print(\"\")\n \"\"\"\n\n return(coelution)", "def peak_to_peak_variability(magnitudes, errors):\n sums = magnitudes + errors\n differences = magnitudes - errors\n\n min_sum = np.min(sums)\n max_diff = np.max(differences)\n\n ptpv = (max_diff - min_sum) / (max_diff + min_sum)\n return ptpv", "def haveEncountered(self,mono1,mono2,eps): \n return self.distance(mono1,mono2) < eps", "def peak_in(self, mz, rt):\n if self.rt_match(rt) and self.mz_match(mz):\n return True\n else:\n return False", "def checkendsilence(inputgiven):\n output = getlastslice(inputgiven)\n wave_file = wave.open(output, \"r\")\n for i in range(wave_file.getnframes()):\n current_frame = wave_file.readframes(1)\n unpacked_signed_value = struct.unpack(\"<h\", current_frame)\n if abs(unpacked_signed_value[0]) > 500:\n return False\n return True", "def is_subset(small, big, abs_tol=ABS_TOL):\n for x in [small, big]:\n if not isinstance(x, (Polytope, Region)):\n msg = 'Not a Polytope or Region, got instead:\\n\\t'\n msg += str(type(x))\n raise TypeError(msg)\n diff = small.diff(big)\n volume = diff.volume\n if volume < abs_tol:\n return True\n else:\n return False", "def overlaps_with_subspace(wavefunc: dict, subspace: list) -> bool:\n assert isinstance(wavefunc, dict), 'Please provide your state as a dict.'\n assert isinstance(subspace, list), 'Please provide subspace as a list of str.'\n\n # Deal with empty subspace:\n if not subspace:\n return False\n assert isinstance(subspace[0], str), 'Please provide subspace as a list of str.'\n assert len(wavefunc) >= len(subspace)\n tol = 1e-7\n\n for basisvector in subspace:\n if abs(wavefunc[basisvector]) > tol:\n return True\n\n return False", "def findPeakAndValley(np):\n peakValleyArray = []\n for i in range (1, len(np) - 1):\n if (np[i][STOCK_VALUE_INDEX] / np[i - 1][STOCK_VALUE_INDEX] > 1 and np[i + 1][STOCK_VALUE_INDEX] / np[i][STOCK_VALUE_INDEX] < 1):\n peakValleyArray.append(i)\n if (np[i][STOCK_VALUE_INDEX] / np[i - 1][STOCK_VALUE_INDEX] < 1 and np[i + 1][STOCK_VALUE_INDEX] / np[i][STOCK_VALUE_INDEX] > 1):\n peakValleyArray.append(i)\n return peakValleyArray", "def binspecdat( wavelength, flux, fluxerr=[], binwidth=10, sigclip=0, sumerrs=False,\n wstart=0, wend=0 ):\n\n w,f = wavelength, flux\n wbinned, fbinned = [], []\n wbin,fbin,dfbin = np.array([]), np.array([]), np.array([])\n dw, df = [], []\n if wstart : istart = np.where( w>wstart )[0][0]\n else : istart = 0\n if wend : iend = np.where( w<wend )[0][-1]\n else : iend = len(w)\n w0 = w[istart]\n for i in range(istart,iend):\n fullbin = False\n if wend and w[i]>wend : break\n if w[i]>w0+binwidth :\n # determine the mean value in this bin\n w0 = w[i]\n igoodval = []\n if sigclip :\n # use sigma clipping to reject outliers\n igoodval = isigclip( fbin, sigclip )\n if len(igoodval) :\n wbinval = np.mean( wbin[igoodval] )\n fbinval = np.mean( fbin[igoodval] )\n dwbinval = (wbin[igoodval].max() - wbin[igoodval].min())/2.\n #dwbinval = (wbin.max() - wbin.min())/2.\n if sumerrs :\n # flux uncertainty is the quadratic sum of the mean flux error\n # and the error of the mean\n dfbinval1 = np.std( fbin[igoodval] ) / np.sqrt(len(igoodval)-2)\n dfbinval2 = np.mean( dfbin[igoodval] ) / np.sqrt(len(igoodval)-2)\n dfbinval = np.sqrt( dfbinval1**2 + dfbinval2**2 )\n else :\n # flux uncertainty is the std error of the mean\n dfbinval = np.std( fbin[igoodval] ) / np.sqrt(len(igoodval)-2)\n\n fullbin = True\n # note: if the binning is not successful, we continue building the bin\n else :\n # use a straight median\n wbinval = np.median( wbin )\n fbinval = np.median( fbin )\n dwbinval = (wbin[-1]-wbin[0])/2.\n if sumerrs :\n # flux uncertainty is the quadratic sum of the mean flux error\n # and the error of the mean\n dfbinval1 = np.std( fbin )/np.sqrt(len(fbin)-2)\n dfbinval2 = np.mean( dfbin )\n dfbinval = np.sqrt( dfbinval1**2 + dfbinval2**2 )\n else :\n # flux uncertainty is the std error of the mean\n dfbinval = np.std( fbin ) / np.sqrt(max(1,len(fbin)))\n fullbin = True\n\n if fullbin :\n wbinned.append( wbinval )\n fbinned.append( fbinval )\n dw.append( dwbinval )\n df.append( dfbinval )\n\n # start a new bin\n wbin,fbin,dfbin = np.array([]), np.array([]), np.array([])\n\n # add a new data point to the bin\n wbin = np.append( wbin, w[i] )\n fbin = np.append( fbin, f[i] )\n if len(fluxerr):\n dfbin = np.append( dfbin, fluxerr[i] )\n else : dfbin = np.append( dfbin, 0 )\n\n return( np.array( wbinned ), np.array(dw), np.array(fbinned), np.array(df) )", "def analyze(self, event):\n\n genjets = Collection(event, \"GenJet\")\n genparts = Collection(event, \"GenPart\")\n lheparts = Collection(event, \"LHEPart\")\n\n photons = []\n\n jets = []\n\n electrons = []\n\n muons = []\n\n for i in range(0,len(lheparts)):\n\n if lheparts[i].pdgId == 22:\n photons.append(i)\n\n if abs(lheparts[i].pdgId) == 11:\n muons.append(i)\n\n if abs(lheparts[i].pdgId) == 13:\n electrons.append(i)\n\n if abs(lheparts[i].pdgId) == 1 or abs(lheparts[i].pdgId) == 2 or abs(lheparts[i].pdgId) == 3 or abs(lheparts[i].pdgId) == 4 or abs(lheparts[i].pdgId) == 5 or abs(lheparts[i].pdgId) == 21:\n jets.append(i)\n\n assert(len(photons) == 1)\n \n assert((len(muons) == 0 and len(electrons) == 0) or (len(muons) == 2 and len(electrons) == 0) or (len(muons) == 0 and len(electrons) == 2)) \n\n assert(len(jets) == 2)\n\n if lheparts[jets[0]].pt < 30 or lheparts[jets[1]].pt < 30:\n return False\n\n if abs(lheparts[jets[0]].eta) > 4.7 or abs(lheparts[jets[1]].eta) > 4.7:\n return False\n\n if (lheparts[jets[0]].p4() + lheparts[jets[1]].p4()).M() < 500:\n return False\n\n if abs(lheparts[jets[0]].eta - lheparts[jets[1]].eta) < 2.5:\n return False\n\n if len(electrons) == 2:\n\n if lheparts[electrons[0]].pt < 25 or abs(lheparts[electrons[0]].eta) > 2.5:\n return False\n\n if lheparts[electrons[1]].pt < 25 or abs(lheparts[electrons[1]].eta) > 2.5:\n return False\n\n if (lheparts[electrons[0]].p4() + lheparts[electrons[1]].p4()).M() < 70 or (lheparts[electrons[0]].p4() + lheparts[electrons[1]].p4()).M() > 110:\n return False\n\n if deltaR(lheparts[photons[0]].eta,lheparts[photons[0]].phi,lheparts[electrons[0]].eta,lheparts[electrons[0]].phi) < 0.7:\n return False\n\n if deltaR(lheparts[photons[0]].eta,lheparts[photons[0]].phi,lheparts[electrons[1]].eta,lheparts[electrons[1]].phi) < 0.7:\n return False\n\n if deltaR(lheparts[jets[0]].eta,lheparts[jets[0]].phi,lheparts[electrons[0]].eta,lheparts[electrons[0]].phi) < 0.5:\n return False\n\n if deltaR(lheparts[jets[0]].eta,lheparts[jets[0]].phi,lheparts[electrons[1]].eta,lheparts[electrons[1]].phi) < 0.5:\n return False\n\n if deltaR(lheparts[jets[1]].eta,lheparts[jets[1]].phi,lheparts[electrons[0]].eta,lheparts[electrons[0]].phi) < 0.5:\n return False\n\n if deltaR(lheparts[jets[1]].eta,lheparts[jets[1]].phi,lheparts[electrons[1]].eta,lheparts[electrons[1]].phi) < 0.5:\n return False\n\n elif len(muons) == 2:\n\n if lheparts[muons[0]].pt < 20 or abs(lheparts[muons[0]].eta) > 2.4:\n return False\n\n if lheparts[muons[1]].pt < 20 or abs(lheparts[muons[1]].eta) > 2.4:\n return False\n\n if (lheparts[muons[0]].p4() + lheparts[muons[1]].p4()).M() < 70 or (lheparts[muons[0]].p4() + lheparts[muons[1]].p4()).M() > 110:\n return False\n\n if deltaR(lheparts[photons[0]].eta,lheparts[photons[0]].phi,lheparts[muons[0]].eta,lheparts[muons[0]].phi) < 0.7:\n return False\n\n if deltaR(lheparts[photons[0]].eta,lheparts[photons[0]].phi,lheparts[muons[1]].eta,lheparts[muons[1]].phi) < 0.7:\n return False\n\n if deltaR(lheparts[jets[0]].eta,lheparts[jets[0]].phi,lheparts[muons[0]].eta,lheparts[muons[0]].phi) < 0.5:\n return False\n\n if deltaR(lheparts[jets[0]].eta,lheparts[jets[0]].phi,lheparts[muons[1]].eta,lheparts[muons[1]].phi) < 0.5:\n return False\n\n if deltaR(lheparts[jets[1]].eta,lheparts[jets[1]].phi,lheparts[muons[0]].eta,lheparts[muons[0]].phi) < 0.5:\n return False\n\n if deltaR(lheparts[jets[1]].eta,lheparts[jets[1]].phi,lheparts[muons[1]].eta,lheparts[muons[1]].phi) < 0.5:\n return False\n else:\n return False\n\n if lheparts[photons[0]].pt < 25:\n return False\n\n if not ((abs(lheparts[photons[0]].eta) < 1.4442) or (1.566 < abs(lheparts[photons[0]].eta) and abs(lheparts[photons[0]].eta) < 2.5)):\n return False\n\n if deltaR(lheparts[jets[0]].eta,lheparts[jets[0]].phi,lheparts[jets[1]].eta,lheparts[jets[1]].phi) < 0.5:\n return False\n\n if deltaR(lheparts[jets[0]].eta,lheparts[jets[0]].phi,lheparts[photons[0]].eta,lheparts[photons[0]].phi) < 0.5:\n return False\n\n if deltaR(lheparts[jets[1]].eta,lheparts[jets[1]].phi,lheparts[photons[0]].eta,lheparts[photons[0]].phi) < 0.5:\n return False\n\n self.nselectedevents += 1\n\n return True", "def has_subroutines(otf: ttLib.TTFont) -> bool:\n table_tag = _sniff_cff_table_format(otf)\n top_dict = otf[table_tag].cff.topDictIndex[0]\n all_subrs = [top_dict.GlobalSubrs]\n if hasattr(top_dict, \"FDArray\"):\n all_subrs.extend(\n fd.Private.Subrs for fd in top_dict.FDArray if hasattr(fd.Private, \"Subrs\")\n )\n elif hasattr(top_dict.Private, \"Subrs\"):\n all_subrs.append(top_dict.Private.Subrs)\n return any(all_subrs)", "def testMoreEvents(self):\n splitter = SplitterFactory()\n jobFactory = splitter(self.singleFileSubscription)\n\n jobGroups = jobFactory(events_per_job=1000,\n performance=self.performanceParams)\n\n self.assertEqual(len(jobGroups), 1)\n\n self.assertEqual(len(jobGroups[0].jobs), 1)\n\n for job in jobGroups[0].jobs:\n self.assertEqual(job.getFiles(type=\"lfn\"), [\"/some/file/name\"])\n self.assertEqual(job[\"mask\"].getMaxEvents(), self.eventsPerJob)\n self.assertEqual(job[\"mask\"][\"FirstEvent\"], 0)\n self.assertEqual(job[\"mask\"][\"LastEvent\"], 99)", "def multi_event(st,et,instrument_chosen,subevent):\r\n print('checking for multiple events within given time window')\r\n \r\n #creating file for time window with first events for all thresholds\r\n out_name = Path(cfg.obs_path) / database_extraction(st,et,instrument_chosen,subevent)\r\n\r\n #creating files for all second events for all thresholds\r\n new_files = two_in_one(out_name,et,subevent)\r\n \r\n #creating files for any third events for all thresholds that had a second event\r\n for file in new_files:\r\n two_in_one(file,et,subevent) \r\n \r\n return", "def determine_peaks_and_limits(\n data, smoothed, limits,\n peak_prom, peak_height,\n valley_prom, valley_height,\n debug, smooth_window_size, outfile,\n skip_smooth,\n):\n mm = max(smoothed)\n peaks, props = find_peaks(smoothed, height=peak_height, prominence=peak_prom) # maxima (peaks positions)\n rpeaks, rprops = find_peaks([-i+mm for i in smoothed], height=valley_height, prominence=valley_prom) # minima (peaks limits)\n\n if len(peaks) > 3 :\n print(\"WARNING: More than 3 peaks detected.\\nPossible erroneous detection:\\n\\t-Restart setting the -ll parameter.\\n\\t-check histogram and modify peak height and prominence arguments accordingly.\\n\\t-Contaminant peaks may also break detection, remove them with tools such as blobtools or by hard-filtering low coverage contigs.\")\n print(\"NOTE: Assuming the last 2 peaks are diploid and haploid...\")\n\n if debug :\n debug_plot_peak_errors(data, smoothed, peaks, limits.values(), rpeaks, smooth_window_size, outfile, skip_smooth)\n\n if len(peaks) > 0 :\n print(\"Peaks found: \" + \"x, \".join(str(p) for p in peaks) + \"x\")\n else :\n raise Exception(\"No peaks found! Try changing the input parameters or setting thresholds manually!\")\n if len(rpeaks) > 0 :\n print(\"Valleys found: \" + \"x, \".join(str(p) for p in rpeaks) + \"x\")\n else :\n print(\"No valleys found!\")\n\n valleys = [0] + list(rpeaks) + [len(smoothed)]\n thresholds = get_threshold_between_peaks(smoothed, peaks, valleys)\n\n relevant_peaks = peaks[-3:]\n #valleys = rpeaks[-3:]\n print(\"Relevant peaks: \" + \"x, \".join(str(p) for p in relevant_peaks) + \"x\")\n print(\"Thresholds:\\n\\t- \" + \"\\t- \".join(\"{}: {}x\\n\".format(k,p) for k,p in thresholds.items()))\n\n return relevant_peaks, thresholds", "def contains(self, x):\n if isinstance(x, list):\n x = np.array(x) # Promote list to array for contains check\n return ((x == 0) | (x == 1)).all() and self.low_limit <= np.count_nonzero(x) <= self.high_limit", "def inside(x,e,n):\n if x < e: return False\n if x > n-e-1: return False\n return True", "def v2v3event(particlelist, vn_event_sums,\n ptmin=0.2, ptmax=2.0, etacut=1.0,\n particletype=None):\n if len(particlelist) > 1:\n if type(etacut) is tuple:\n etamin = etacut[0]\n etamax = etacut[1]\n else:\n etamin = -etacut\n etamax = etacut\n\n # Apply pT, rapidity and charge cuts\n filtered_particles = [x for x in particlelist\n if (x.pt > ptmin and x.pt < ptmax\n and x.pseudorap > etamin\n and x.pseudorap < etamax\n and x.charge != 0)]\n\n ncharges = len(filtered_particles)\n # Several particles are required for proper analysis\n if ncharges < 2:\n return\n\n ptarray = numpy.array([x.pt for x in filtered_particles])\n phiarray = numpy.array([x.phi for x in filtered_particles])\n\n ptphisin2 = numpy.multiply(ptarray, numpy.sin(2 * phiarray))\n ptphicos2 = numpy.multiply(ptarray, numpy.cos(2 * phiarray))\n ptphisin3 = numpy.multiply(ptarray, numpy.sin(3 * phiarray))\n ptphicos3 = numpy.multiply(ptarray, numpy.cos(3 * phiarray))\n\n # exclude the analysed particle from event plane definition\n # to avoid autocorrelations\n sin2mean = numpy.subtract(ptphisin2.sum(), ptphisin2) / (ncharges - 1)\n cos2mean = numpy.subtract(ptphicos2.sum(), ptphicos2) / (ncharges - 1)\n sin3mean = numpy.subtract(ptphisin3.sum(), ptphisin3) / (ncharges - 1)\n cos3mean = numpy.subtract(ptphicos3.sum(), ptphicos3) / (ncharges - 1)\n\n # Subevent sums for resolution correction calculation\n sin2subsum = numpy.zeros(2)\n cos2subsum = numpy.zeros(2)\n sin3subsum = numpy.zeros(2)\n cos3subsum = numpy.zeros(2)\n for i in range(0, 2):\n sin2subsum[i] = numpy.sum(ptphisin2[i::2])\n cos2subsum[i] = numpy.sum(ptphicos2[i::2])\n sin3subsum[i] = numpy.sum(ptphisin3[i::2])\n cos3subsum[i] = numpy.sum(ptphicos3[i::2])\n\n # full event value\n psitwoarray = numpy.arctan2(sin2mean, cos2mean) / 2\n psithreearray = numpy.arctan2(sin3mean, cos3mean) / 3\n if particletype:\n typearray = numpy.array([1 if x.ptype == particletype else 0\n for x in filtered_particles])\n v2eventsum = numpy.sum(numpy.multiply(numpy.cos(2 * (phiarray - psitwoarray)), typearray))\n v3eventsum = numpy.sum(numpy.multiply(numpy.cos(3 * (phiarray - psithreearray)), typearray))\n ncharges = numpy.sum(typearray)\n\n if ncharges < 2:\n return\n else:\n v2eventsum = numpy.sum(numpy.cos(2 * (phiarray - psitwoarray)))\n v3eventsum = numpy.sum(numpy.cos(3 * (phiarray - psithreearray)))\n\n vn_event_sums[0] += v2eventsum / ncharges\n vn_event_sums[1] += (v2eventsum / ncharges)**2\n vn_event_sums[2] += v3eventsum / ncharges\n vn_event_sums[3] += (v3eventsum / ncharges)**2\n\n psi2sub = numpy.arctan2(sin2subsum, cos2subsum) / 2\n psi3sub = numpy.arctan2(sin3subsum, cos3subsum) / 3\n\n psi2term = math.cos(2 * (psi2sub[0] - psi2sub[1]))\n psi3term = math.cos(3 * (psi3sub[0] - psi3sub[1]))\n vn_event_sums[4] += psi2term\n vn_event_sums[5] += psi2term**2\n vn_event_sums[6] += psi3term\n vn_event_sums[7] += psi3term**2", "def DetectPulseOnset(self, asig, fs, wMS):\n # the percentage of the maximal value of the slope sum function\n # to detect the onset\n AmplitudeRatio = .01\n\n # low pass filter\n sig = self.zpIIR(asig, 3, .1, 20, 5 * 2/fs)\n wSmp = int(np.round(wMS*fs/1000))\n\n BlankWindowRatio = .9\n\n # delta x\n diffsig = np.diff(sig)\n\n z = np.empty((sig.size - 1 - wSmp, 1))\n z[:] = np.NaN\n\n # calculate slope sum function\n for i in range(wSmp,sig.size-1):\n subsig = diffsig[i-wSmp:i]\n z[i-wSmp] = np.sum(subsig[subsig>0])\n\n z0 = np.mean(z)\n onset = [0]\n tPnt = []\n zThres = 0\n blankWin = int(np.round(400*fs/1000))\n subIdx = np.r_[onset[0]: onset[0] + 4*blankWin + 1]\n MedianArrayWinSize = 5\n\n # this value controls the final acceptance\n PrcofMaxAMP = .2\n SSFAmpArray = np.ones((MedianArrayWinSize,1))*(np.max(z) - np.min(z)) * PrcofMaxAMP\n # the percentage of maximal amplitude for threshold crossing\n DetectionThreshold = .2\n SSFCrossThresholdArray = np.ones((MedianArrayWinSize,1))*z0*DetectionThreshold\n idx = 1\n\n # Keep loop going while onsets detected\n while(1):\n\n # look for the first location where z > z0\n try:\n\n # Look in z[subIdx] (and make sure it doesn't go past z's size)\n # find first index where z > the mean of z\n tempIndex = np.trim_zeros(subIdx*(z.size>subIdx), 'b')\n ix = np.amin(np.where(z[tempIndex] > z0)[0])\n except:\n break\n\n ix = tempIndex[ix]\n tPnt.append(ix)\n srcWin = np.r_[np.maximum(0,ix - wSmp): ix + wSmp]\n #if the window has passed the length of the data, then exit\n if srcWin[-1] >= len(z):\n break\n\n # This section of code is to remove the initial zero-region in the SSF function before looking for onset (if such region exists)\n zPnt = np.where(z[srcWin] == 0)\n\n if zPnt[0].size != 0:\n zPnt = srcWin[zPnt[0]]\n\n if np.any(zPnt < ix):\n srcWin = np.r_[zPnt[np.max(np.where(zPnt < ix))]: ix + wSmp]\n\n # accept the window\n if ( np.max(z[srcWin]) - np.min(z[srcWin]) > zThres):\n\n # calculate the threshold for next cycle\n SSFAmp = (np.max(z[srcWin]) - np.min(z[srcWin])) * PrcofMaxAMP\n SSFAmpArray[np.remainder(idx, MedianArrayWinSize)] = SSFAmp\n zThres = np.median(SSFAmpArray)\n SSFCrossThresholdArray[np.remainder(idx, MedianArrayWinSize)] = np.mean(z[srcWin])*DetectionThreshold\n z0 = np.median(SSFCrossThresholdArray)\n minSSF = np.min(z[srcWin]) + SSFAmp *AmplitudeRatio\n a = srcWin[0] + np.min(np.where(z[srcWin] >= minSSF))\n onset.append(a)\n\n # adaptively determine analysis window for next cycle\n bw = blankWin\n subIdx = np.round(np.r_[a + bw: a + 3*bw])\n idx = idx + 1\n\n else:\n # no beat detected\n subIdx = np.round(subIdx + blankWin)\n\n return onset", "def substract_given_gaussian(wavelength, spectrum, centre, peak=0, sigma=0, flux=0, search_peak=False, allow_absorptions = False,\n lowlow= 20, lowhigh=10, highlow=10, highhigh = 20, \n lmin=0, lmax=0, fmin=0, fmax=0, plot=True, fcal=False, verbose = True, warnings=True): \n do_it = False\n # Check that we have the numbers!\n if peak != 0 and sigma != 0 : do_it = True\n\n if peak == 0 and flux != 0 and sigma != 0:\n #flux = peak * sigma * np.sqrt(2*np.pi)\n peak = flux / (sigma * np.sqrt(2*np.pi))\n do_it = True \n\n if sigma == 0 and flux != 0 and peak != 0 :\n #flux = peak * sigma * np.sqrt(2*np.pi)\n sigma = flux / (peak * np.sqrt(2*np.pi)) \n do_it = True \n \n if flux == 0 and sigma != 0 and peak != 0 :\n flux = peak * sigma * np.sqrt(2*np.pi)\n do_it = True\n\n if sigma != 0 and search_peak == True: do_it = True \n\n if do_it == False:\n print(\"> Error! We need data to proceed! Give at least two of [peak, sigma, flux], or sigma and force peak to f[centre]\")\n s_s = spectrum\n else:\n # Setup wavelength limits\n if lmin == 0 :\n lmin = centre-65. # By default, +-65 A with respect to line\n if lmax == 0 :\n lmax = centre+65.\n \n # Extract subrange to fit\n w_spec = []\n f_spec = []\n w_spec.extend((wavelength[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n f_spec.extend((spectrum[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n \n # Setup min and max flux values in subrange to fit\n if fmin == 0 :\n fmin = np.nanmin(f_spec) \n if fmax == 0 :\n fmax = np.nanmax(f_spec) \n \n # Define continuum regions: [-lowlow, -lowhigh] and [highlow,highhigh] in Angstroms with respect to centre\n w_cont=[]\n f_cont=[]\n w_cont.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > centre-lowlow and w_spec[i] < centre-lowhigh) or (w_spec[i] > centre+highlow and w_spec[i] < centre+highhigh) ) \n f_cont.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > centre-lowlow and w_spec[i] < centre-lowhigh) or (w_spec[i] > centre+highlow and w_spec[i] < centre+highhigh) ) \n \n # Linear Fit to continuum \n try: \n mm,bb = np.polyfit(w_cont, f_cont, 1)\n except Exception:\n bb = np.nanmedian(spectrum)\n mm = 0.\n if verbose or warnings: \n print(\" WARNING! Impossible to get the continuum!\")\n print(\" Scaling the continuum to the median value\") \n continuum = mm*np.array(w_spec)+bb \n # c_cont = mm*np.array(w_cont)+bb \n # rms continuum\n # rms_cont = np.nansum([ np.abs(f_cont[i] - c_cont[i]) for i in range(len(w_cont)) ]) / len(c_cont)\n\n if search_peak:\n # Search for index here w_spec(index) closest to line\n try:\n min_w = np.abs(np.array(w_spec)-centre)\n mini = np.nanmin(min_w)\n peak = f_spec[min_w.tolist().index(mini)] - continuum[min_w.tolist().index(mini)]\n flux = peak * sigma * np.sqrt(2*np.pi) \n if verbose: print(\" Using peak as f[\",np.round(centre,2),\"] = \",np.round(peak,2),\" and sigma = \", np.round(sigma,2), \" flux = \",np.round(flux,2))\n except Exception:\n if verbose or warnings: print(\" Error trying to get the peak as requested wavelength is \",np.round(centre,2),\"! Ignoring this fit!\")\n peak = 0.\n flux = -0.0001\n \n no_substract = False\n if flux < 0:\n if allow_absorptions == False:\n if np.isnan(centre) == False:\n if verbose or warnings : print(\" WARNING! This is an ABSORPTION Gaussian! As requested, this Gaussian is NOT substracted!\")\n no_substract = True\n if no_substract == False: \n if verbose: print(\" Substracting Gaussian at {:7.1f} with peak ={:10.4f} sigma ={:6.2f} and flux ={:9.4f}\".format(centre, peak,sigma,flux))\n \n gaussian_fit = gauss(w_spec, centre, peak, sigma)\n \n \n index=0\n s_s=np.zeros_like(spectrum)\n for wave in range(len(wavelength)):\n s_s[wave]=spectrum[wave]\n if wavelength[wave] == w_spec[0] : \n s_s[wave] = f_spec[0]-gaussian_fit[0]\n index=1\n if wavelength[wave] > w_spec[0] and wavelength[wave] <= w_spec[-1]:\n s_s[wave] = f_spec[index]-gaussian_fit[index]\n index=index+1\n if plot: \n plt.figure(figsize=(10, 4))\n plt.plot(np.array(w_spec),np.array(f_spec), \"b\", lw=3, alpha = 0.5)\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\")\n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n \n # Vertical line at line\n plt.axvline(x=centre, color='k', linestyle='-', alpha=0.8)\n # Horizontal line at y = 0\n plt.axhline(y=0, color='k', linestyle=':', alpha=0.5) \n # Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]\n plt.axvspan(centre+highlow, centre+highhigh, facecolor='g', alpha=0.15,zorder=3)\n plt.axvspan(centre-lowlow, centre-lowhigh, facecolor='g', alpha=0.15,zorder=3)\n # Plot linear fit for continuum\n plt.plot(w_spec, continuum,\"g--\")\n # Plot Gaussian fit \n plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8) \n # Vertical lines to emission line\n #plt.axvline(x= low_limit, color='k', linestyle=':', alpha=0.5)\n #plt.axvline(x= high_limit, color='k', linestyle=':', alpha=0.5) \n # Plot residuals\n #plt.plot(w_spec, residuals, 'k')\n #plt.title('Fit: x0=%.2f y0=%.2e sigma=%.2f flux=%.2e rms=%.3e' % (fit[0], fit[1], fit[2], gaussian_flux, rms_fit))\n plt.show() \n plt.close()\n \n plt.figure(figsize=(10, 4))\n plt.plot(wavelength,spectrum, \"r\")\n plt.plot(wavelength,s_s, \"c\")\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\")\n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n plt.show()\n plt.close()\n else:\n s_s = spectrum\n return s_s", "def peaks(n, binCenters, method=\"JI\", window=100, peakAmpThresh=0.00005, valleyThresh=0.00003):\n data = zip(binCenters, n)\n binCenters = np.array(binCenters)\n firstCenter = (min(binCenters)+1.5*window)/window*window\n lastCenter = (max(binCenters)-window)/window*window\n if firstCenter < -1200: firstCenter = -1200\n if lastCenter > 3600: lastCenter = 3600\n\n\n if method == \"slope\" or method == \"hybrid\":\n peaks = {}\n peakInfo = peaksBySlope(n, binCenters, lookahead=20, delta=valleyThresh, averageHist=True)\n\n #find correspondences between peaks and valleys, and set valleys are left and right Indices\n #see the other method(s) for clarity!\n\n peakData = peakInfo[\"peaks\"]\n valleyData = peakInfo[\"valleys\"]\n\n #print len(peakData[0]), len(peakData[1])\n for i in xrange(len(peakData[0])):\n nearestIndex = findNearestIndex(valleyData[0], peakData[0][i])\n if valleyData[0][nearestIndex] < peakData[0][i]:\n leftIndex = findNearestIndex(binCenters, valleyData[0][nearestIndex])\n if (len(valleyData[0][nearestIndex+1:]) == 0):\n rightIndex = findNearestIndex(binCenters, peakData[0][i]+window/2.0)\n else:\n offset = nearestIndex+1\n nearestIndex = offset+findNearestIndex(valleyData[0][offset:], peakData[0][i])\n rightIndex = findNearestIndex(binCenters, valleyData[0][nearestIndex])\n else:\n rightIndex = findNearestIndex(binCenters, valleyData[0][nearestIndex])\n if (len(valleyData[0][:nearestIndex]) == 0):\n leftIndex = findNearestIndex(binCenters, peakData[0][i]-window/2.0)\n else:\n nearestIndex = findNearestIndex(valleyData[0][:nearestIndex], peakData[0][i])\n leftIndex = findNearestIndex(binCenters, valleyData[0][nearestIndex])\n\n pos = findNearestIndex(binCenters, peakData[0][i])\n #print binCenters[pos], peakData[1][i], binCenters[leftIndex], binCenters[rightIndex]\n peaks[pos] = [peakData[1][i], leftIndex, rightIndex]\n\n if method == \"hybrid\": slopePeaks = peaks\n \n if method == \"JI\" or method == \"ET\" or method == \"hybrid\":\n peaks = {}\n #Obtain max value per interval\n if method == \"JI\" or method == \"hybrid\":\n firstCenter = nearestJI(firstCenter)\n lastCenter = nearestJI(lastCenter)\n\n interval = firstCenter\n prevInterval = firstCenter-window\n #NOTE: All *intervals are in cents. *indices are of binCenters/n\n while interval < lastCenter:\n if method == \"ET\":\n leftIndex = findNearestIndex(binCenters, interval-window/2)\n rightIndex = findNearestIndex(binCenters, interval+window/2)\n interval += window\n elif method == \"JI\" or method == \"hybrid\":\n leftIndex = findNearestIndex(binCenters, (interval+prevInterval)/2.0)\n prevInterval = interval\n interval = nextJI(interval)\n rightIndex = findNearestIndex(binCenters, (interval+prevInterval)/2.0)\n peakPos = np.argmax(n[leftIndex:rightIndex])\n peakAmp = n[leftIndex+peakPos]\n peaks[leftIndex+peakPos] = [peakAmp, leftIndex, rightIndex]\n \n #print binCenters[leftIndex], binCenters[rightIndex], binCenters[leftIndex+peakPos], peakAmp\n #NOTE: All the indices (left/rightIndex, peakPos) are to be changed to represent respective cent \n #value corresponding to the bin. Right now, they are indices of respective binCenters in the array.\n \n if method == \"hybrid\":\n #Mix peaks from slope method and JI method.\n p1 = slopePeaks.keys()\n p2 = peaks.keys()\n allPeaks = {} #overwriting peaks dict\n for p in p1:\n nearIndex = findNearestIndex(p2, p)\n if abs(p-p2[nearIndex]) < window/2.0: p2.pop(nearIndex)\n \n for p in p1: allPeaks[p] = slopePeaks[p]\n for p in p2: allPeaks[p] = peaks[p]\n peaks = allPeaks\n\n #Filter the peaks and retain eligible peaks, also get their valley points.\n\n # ----> peakAmpThresh <---- : remove the peaks which are below that\n\n for pos in peaks.keys():\n #pos is an index in binCenters/n. DOES NOT refer to a cent value.\n if peaks[pos][0] < peakAmpThresh:\n #print \"peakAmp: \", binCenters[pos]\n peaks.pop(pos)\n\n #Check if either left or right valley is deeper than ----> valleyThresh <----.\n valleys = {}\n for pos in peaks.keys():\n leftLobe = n[peaks[pos][1]:pos]\n rightLobe = n[pos:peaks[pos][2]]\n #Sanity check: Is it a genuine peak? Size of distributions on either side of the peak should be comparable.\n if len(leftLobe) == 0 or len(rightLobe) == 0:\n continue\n if 1.0*len(leftLobe)/len(rightLobe) < 0.15 or 1.0*len(leftLobe)/len(rightLobe) > 6.67:\n #print \"size: \", binCenters[pos]\n #peaks.pop(pos)\n continue\n\n leftValleyPos = np.argmin(leftLobe)\n rightValleyPos = np.argmin(rightLobe)\n if (abs(leftLobe[leftValleyPos]-n[pos]) < valleyThresh and abs(rightLobe[rightValleyPos]-n[pos]) < valleyThresh):\n #print \"valley: \", binCenters[pos]\n peaks.pop(pos)\n else:\n valleys[peaks[pos][1]+leftValleyPos] = leftLobe[leftValleyPos]\n valleys[pos+rightValleyPos] = rightLobe[rightValleyPos]\n \n if len(peaks) > 0:\n temp1 = np.array(peaks.values())\n temp1 = temp1[:, 0]\n\n return {'peaks':[binCenters[peaks.keys()], temp1], 'valleys':[binCenters[valleys.keys()], valleys.values()]}\n else:\n return {'peaks':[[], []], 'valleys':[[], []]}", "def expression_peaks(cluster, magnitude, group1 = [ \"SP\", \"SL06\", \"SL12\", \"SL24\",\"SL48\", \"SL96\" ], group2 = [ \"FL\", \"FP06\", \"FP12\", \"FP24\",\"FP48\", \"FP96\" ]):\n if cluster.averaged == False:\n cluster.average_matrix(group1 + group2)\n verbalise(\"G\", cluster.sample_header)\n peaklist = {}\n\n for gene in range(cluster.genenumber):\n # for group 1:\n datalist = list(cluster.data_matrix[:,gene])\n maxexpression = max(datalist[:len(group1)])\n maxposn = datalist.index(maxexpression)\n\n # check fold change is sufficient:\n if maxexpression >= magnitude + datalist[0]:\n # check adjacent peaks are not too big:\n # difference of 5.64 corresponds to 2% of the untransformed fpkm value\n # difference of 1.00 corresponds to 50% of the untransformed fpkm value\n if maxposn == len(group1) - 1:\n if (maxexpression - 5.64 < datalist[maxposn - 1] < maxexpression - 1):\n peaklist[cluster.gene_header[gene]] = group1[maxposn]\n\n elif (maxexpression * 0.02 < datalist[maxposn - 1] < maxexpression * 0.5) and \\\n (maxexpression * 0.02 < datalist[maxposn + 1] < maxexpression * 0.5):\n\n peaklist[cluster.gene_header[gene]] = group1[maxposn]\n\n # for group 2:\n maxexpression = max(datalist[len(group1):])\n maxposn = datalist.index(maxexpression)\n\n # check fold change is sufficient for reciprocal swap:\n if maxexpression >= magnitude * datalist[len(group1)]:\n # check adjacent peaks are not too big:\n try:\n if maxposn == len(group1+group2) - 1:\n if (maxexpression * 0.02 < datalist[maxposn - 1] < maxexpression * 0.5):\n peaklist[cluster.gene_header[gene]] = (group1 + group2)[maxposn]\n\n elif (maxexpression * 0.02 < datalist[maxposn - 1] < maxexpression * 0.5) and \\\n (maxexpression * 0.02 < datalist[maxposn + 1] < maxexpression * 0.5):\n\n peaklist[cluster.gene_header[gene]] = (group1 + group2)[maxposn]\n except IndexError as inst:\n verbalise(\"R\", inst)\n verbalise(\"R\", datalist)\n verbalise(\"R\", \"Max is %.3f at position %d\" % (maxexpression, maxposn))\n\n verbalise(\"G\", len(peaklist), \"significant peaks found.\")\n return peaklist", "def Checker(a,b,n,x):\n if n==0:\n if abs(a[0]-b[0])>=x: #if the changes in eta from one time step to another is more than .05mm\n return True #return true to continue the loop\n else:\n return False #stop the loop (this only happens if all of the points had a change of less than .05mm)\n elif abs(a[n]-b[n])>=x: #this checks each of the points in the channel \n return True #if any have too big a change the loop continues\n else: #if that point in the channel has small enough change\n Checker(a,b,n-1) #check the next point in the channel", "def checkCorrectLumisEventGEN(dataset):\n numlumis = dbs3Client.getLumiCountDataSet(dataset)\n numEvents = dbs3Client.getEventCountDataSet(dataset)\n # numEvents / numLumis >= 300\n if numlumis >= numEvents / 300.0:\n return True\n else:\n return False", "def _check_preceding_time_interval_threshold(data, index, time_window, threshold, min_count):\n\n\t# define the start slice (note that we look backwards here)\n\tstart_slice = index - time_window\n\t# define the end slice, since python does not include the item defined in the end slice, we do not have to subtract -1. For example, 100:120 does not include 120\n\tend_slice = index\n\n\t# if the start slice is negative, then we set it to 0 since there are no values with indexes lower than 0\n\tif start_slice < 0:\n\t\t# set start slice to zero to indicate the beginning of the list\n\t\tstart_slice = 0\n\t\n\t# return True or False if the window contains more than the min_count\n\treturn ((data[start_slice:end_slice] > threshold).sum()) >= min_count", "def reduce_peaks(self,peaks,odf_min):\n if len(peaks)==0:\n return -1 \n if odf_min<self.iso_thr*peaks[0]:\n #remove small peaks\n ismallp=np.where(peaks<self.peak_thr*peaks[0])\n if len(ismallp[0])>0:\n l=ismallp[0][0]\n else:\n l=len(peaks)\n else:\n return -1\n return l", "def is_yeast_frame(frame):\n frame = frame.astype(np.float)[20:]\n frame = frame - frame.min()\n frame = frame / frame.max()\n\n E = convolve(frame, np.ones((21, 21)) / (21 ** 2), mode='mirror')\n var = convolve((frame - E) ** 2, np.ones((21, 21)) / (21 ** 2), mode='mirror')\n maxvar = var.max()\n minvar = var.mean()\n return maxvar > 2 * minvar", "def detect_peaks(x_data, y_data, imx, sigmamv=.25, fig=400, period=1e-3, model='one_ele'):\n thr = .4\n thr2 = .6\n\n # chop off part of the data, because T1 is relatively long\n mvedge = .1 * (np.max(x_data) - np.min(x_data))\n if model == 'two_ele':\n mvthr = (np.max(x_data) - np.min(x_data)) * .25e-3 / period # T1 \\approx .1 ms [Ref]\n horz_vals = x_data[(x_data > (np.min(x_data) + np.maximum(mvthr, mvedge)))\n & (x_data < (np.max(x_data) - mvedge))]\n z_data = imx[:, (x_data > (np.min(x_data) + np.maximum(mvthr, mvedge))) & (x_data < (np.max(x_data) - mvedge))]\n elif model == 'one_ele':\n horz_vals = x_data[(x_data > (np.min(x_data) + mvedge)) & (x_data < (np.max(x_data) - mvedge))]\n z_data = imx[:, (x_data > (np.min(x_data) + mvedge)) & (x_data < (np.max(x_data) - mvedge))]\n else:\n raise Exception('no such model')\n\n scalefac = (np.max(horz_vals) - np.min(horz_vals)) / (z_data.shape[1] - 1) # mV/pixel\n\n # smooth input image\n kern = scipy.signal.gaussian(71, std=sigmamv / scalefac)\n kern = kern / kern.sum()\n imx2 = scipy.ndimage.convolve(z_data, kern.reshape((1, -1)), mode='nearest')\n\n # get maximum value for each row\n mm1 = np.argmax(imx2, axis=1)\n val = imx2[np.arange(0, imx2.shape[0]), mm1]\n\n idx1 = np.where(np.abs(val) > thr)[0] # only select indices above scaled threshold\n\n xx1 = np.vstack((horz_vals[mm1[idx1]], y_data[idx1])) # position of selected points\n\n # get minimum value for each row\n mm2 = np.argmin(imx2, axis=1)\n val = imx2[np.arange(0, imx2.shape[0]), mm2]\n # remove points below threshold\n idx2 = np.where(np.abs(val) > thr)[0]\n\n xx2 = np.vstack((horz_vals[mm2[idx2]], y_data[idx2]))\n\n # join the two sets\n detected_peaks = np.hstack((xx1, xx2))\n\n # determine weights for the points\n qq = np.intersect1d(idx1, idx2)\n q1 = np.searchsorted(idx1, qq)\n q2 = np.searchsorted(idx2, qq)\n w1 = .5 * np.ones(len(idx1))\n w1[q1] = 1\n w2 = .5 * np.ones(len(idx2))\n w2[q2] = 1\n\n wfac = .1\n w1[np.abs(val[idx1]) < thr2] = wfac\n w2[np.abs(val[idx2]) < thr2] = wfac\n weights = np.hstack((w1, w2))\n\n if fig is not None:\n plt.figure(fig)\n plt.clf()\n plt.pcolormesh(x_data, y_data, imx, shading='auto')\n plt.plot(horz_vals[mm1[idx1]], y_data[idx1], '.b', markersize=14, label='idx1')\n plt.plot(horz_vals[mm2[idx2]], y_data[idx2], '.r', markersize=14, label='idx2')\n plt.xlabel('Detuning (mV)')\n plt.ylabel('Frequency (Hz)')\n\n return detected_peaks, {'weights': weights, 'detected_peaks': detected_peaks}", "def _detect_peaks(x, mph=None, mpd=1, threshold=0, edge='rising', kpsh=False, valley=False):\n\n x = np.atleast_1d(x).astype('float64')\n if x.size < 3:\n return np.array([], dtype=int)\n if valley:\n x = -x\n # find indices of all peaks\n dx = x[1:] - x[:-1]\n # handle NaN's\n indnan = np.where(np.isnan(x))[0]\n if indnan.size:\n x[indnan] = np.inf\n dx[np.where(np.isnan(dx))[0]] = np.inf\n ine, ire, ife = np.array([[], [], []], dtype=int)\n if not edge:\n ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0]\n else:\n if edge.lower() in ['rising', 'both']:\n ire = np.where((np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0))[0]\n if edge.lower() in ['falling', 'both']:\n ife = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0))[0]\n ind = np.unique(np.hstack((ine, ire, ife)))\n # handle NaN's\n if ind.size and indnan.size:\n # NaN's and values close to NaN's cannot be peaks\n ind = ind[np.in1d(ind, np.unique(np.hstack((indnan, indnan-1, indnan+1))), invert=True)]\n # first and last values of x cannot be peaks\n if ind.size and ind[0] == 0:\n ind = ind[1:]\n if ind.size and ind[-1] == x.size-1:\n ind = ind[:-1]\n # remove peaks < minimum peak height\n if ind.size and mph is not None:\n ind = ind[x[ind] >= mph]\n # remove peaks - neighbors < threshold\n if ind.size and threshold > 0:\n dx = np.min(np.vstack([x[ind]-x[ind-1], x[ind]-x[ind+1]]), axis=0)\n ind = np.delete(ind, np.where(dx < threshold)[0])\n # detect small peaks closer than minimum peak distance\n if ind.size and mpd > 1:\n ind = ind[np.argsort(x[ind])][::-1] # sort ind by peak height\n idel = np.zeros(ind.size, dtype=bool)\n for i in range(ind.size):\n if not idel[i]:\n # keep peaks with the same height if kpsh is True\n idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) \\\n & (x[ind[i]] > x[ind] if kpsh else True)\n idel[i] = 0 # Keep current peak\n # remove the small peaks and sort back the indices by their occurrence\n ind = np.sort(ind[~idel])\n\n return ind", "def evaluation_metrics(pred_peaks, true_peaks, fecg_fs: int = 1000, accept_window: int = 10):\n tp = 0\n fp = 0\n for pred_peak in pred_peaks:\n peak_diff = true_peaks - pred_peak\n if np.min(np.abs(peak_diff)) > accept_window * 1000 / fecg_fs:\n fp += 1\n else:\n tp += 1\n fn = len(true_peaks) - tp\n\n tpr = tp / (tp + fn)\n ppv = tp / (tp + fp)\n f1 = 2 * (tpr * ppv) / (tpr + ppv)\n acc = tp / (tp + fn + fp)\n # print(tp, fp, fn)\n return tpr, ppv, acc, f1", "def get_following_peak_multi_channel(ind_spike, sigs, sign, method = 'biggest_amplitude'):\n \n multi_peaks =[ ]\n amplitudes = [ ]\n for c, sig in enumerate(sigs):\n multi_peaks.append(get_following_peak(ind_spike, sig, sign))\n multi_peaks = np.array(multi_peaks)\n \n ind_peaks = -np.ones(ind_spike.size, dtype = 'i')\n for i, ind in enumerate(ind_spike):\n if method == 'closer':\n ind_peaks = multi_peak[:,i].min()\n elif method == 'biggest_amplitude':\n if np.all(multi_peaks[:,i] == -1):\n ind_peaks[i] = -1\n continue\n \n peak_values = [ ]\n for c, sig in enumerate(sigs):\n if multi_peaks[c,i] != -1:\n peak_values.append(sig[multi_peaks[c,i]])\n else:\n peak_values.append(0)\n \n if sign == '+':\n biggest = np.argmax(peak_values)\n elif sign == '-':\n biggest = np.argmin(peak_values)\n ind_peaks[i] = multi_peaks[biggest,i]\n \n \n return ind_peaks+1", "def __get_hairiness(self, ll_list):\n for j in xrange(self.convergence_window, len(ll_list)):\n\t\n match=0\n for i in xrange(self.convergence_window): \n # Test if likelihood is oscillating\n if (ll_list[j-i] > ll_list[j-i+1] and ll_list[j-i] > ll_list[j-i-1]) \\\n or (ll_list[j-i] < ll_list[j-i-1] and ll_list[j-i] < ll_list[j-i+1]) : \n match += 1 \n \n if match >= self.convergence_threshold:\n return True\t\n return False", "def test_overlap_plugin(input_peaks, split_i):\n chunks = np.split(input_peaks, [split_i])\n chunks = [c for c in chunks if not len(c) == 0]\n\n class Peaks(strax.Plugin):\n depends_on = tuple()\n dtype = strax.interval_dtype\n\n def compute(self, chunk_i):\n data = chunks[chunk_i]\n return self.chunk(\n data=data,\n start=int(data[0]['time']),\n end=int(strax.endtime(data[-1])))\n\n # Hack to make peak output stop after a few chunks\n def is_ready(self, chunk_i):\n return chunk_i < len(chunks)\n\n def source_finished(self):\n return True\n\n window = 10\n\n # Note we must apply this to endtime, not time, since\n # peaks straddling the overlap threshold are assigned to the NEXT window.\n # If we used time it would fail on examples with peaks larger than window.\n # In real life, the window should simply be chosen large enough that this\n # is not an issue.\n def count_in_window(ts, w=window):\n # Terribly inefficient algorithm...\n result = np.zeros(len(ts), dtype=np.int16)\n for i, t in enumerate(ts):\n result[i] = ((ts < t + w) & (ts > t - w)).sum()\n return result\n\n class WithinWindow(strax.OverlapWindowPlugin):\n depends_on = ('peaks',)\n dtype = [('n_within_window', np.int16)] + strax.time_fields\n\n def get_window_size(self):\n return window\n\n def compute(self, peaks):\n return dict(\n n_within_window=count_in_window(strax.endtime(peaks)),\n time=peaks['time'][:1],\n endtime=strax.endtime(peaks)[-1:])\n\n st = strax.Context(storage=[])\n st.register(Peaks)\n st.register(WithinWindow)\n\n result = st.get_array(run_id='some_run', targets='within_window')\n expected = count_in_window(strax.endtime(input_peaks))\n\n assert len(expected) == len(input_peaks), \"WTF??\"\n assert isinstance(result, np.ndarray), \"Did not get an array\"\n assert len(result) == len(expected), \"Result has wrong length\"\n np.testing.assert_equal(result['n_within_window'], expected,\n \"Counting went wrong\")", "def triggered_atom_is_max_disp(path_to_data_dir, event):\n\tprint \"event:\", event[0],event[1][0],event[1][1],event[1][2]\n\t\n\tif 'test' in event[0]:\n\t\ttest_id = int(event[0][4:])\n\telse:\n\t\ttest_id = int(event[0])\n\t\n\tpath_to_test_dir = data_dir_to_test_dir(path_to_data_dir, test_id)\n\t\n\t#path_to_test_dir = path_to_data_dir + event[0]\n\t\n\ttriggered_atom_index = read_from_art_input_file(path_to_test_dir)\n\t\n\tinit, sad, fin = event[1][0],event[1][1],event[1][2]\n\t\n\tpath_to_event = path_to_test_dir + \"/results/event_\" + init + \"_\" + sad + \"_\" + fin\n\t\n\tpath_to_init_sad = path_to_event + \"/init_sad\"\n\t\n\tpath_to_displacement = path_to_init_sad + \"/displacement_results_dict.pkl\"\n\t\n\tif os.path.exists(path_to_displacement):\n\t\tprint \"path to displacement:\", path_to_displacement\n\t\tevent_disp = pickle.load(open(path_to_displacement,'r'))\n\t\tindex_max_disp = max(event_disp.iteritems(), key=operator.itemgetter(1))[0]\n\t\tprint \"max displacement atom index:\", index_max_disp\n\t\tif len(triggered_atom_index) == 1:\n\t\t\tif triggered_atom_index[0] == index_max_disp:\n\t\t\t\tprint \"True\"\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tprint \"False\"\n\t\t\t\treturn False\n\t\telse:\n\t\t\tprint \"multiple triggering atoms exists!\"\t\n\telse:\n\t\tprint(\"no displacement data has been calculated in current event\")", "def test_find_peak_near_frequency(tmpdir, debug=False):\n\n fft_freqs = [0, 1e9, 2e9, 3e9, 4e9, 5e9, 6e9, 7e9, 8e9]\n fft_mx = [1, 4, 3, 2, 1.5, 1.3, 2.5, 1.1, 1.0]\n fft_my = [1, 4, 3, 2, 1.5, 1.3, 1.1, 1.7, 1.0]\n fft_mz = [2, 1, 1, 1, 1, 1, 1, 1, 1.5]\n\n if debug == True:\n # Plot the arrays for debugging\n os.chdir(str(tmpdir))\n fig = plt.figure()\n ax = fig.gca()\n ax.plot(fft_freqs, fft_mx, label='fft_mx')\n ax.plot(fft_freqs, fft_my, label='fft_my')\n ax.plot(fft_freqs, fft_mz, label='fft_mz')\n ax.legend()\n fig.savefig('fft_vals.png')\n\n assert find_peak_near_frequency(1.5e9, fft_freqs, fft_mx) == (1e9, 1)\n #assert find_peak_near_frequency(1.5e9, fft_freqs, [fft_mx, fft_my]) == (1, 1e9)\n assert find_peak_near_frequency(5e9, fft_freqs, fft_mx) == (6e9, 6)\n assert find_peak_near_frequency(5e9, fft_freqs, fft_my) == (7e9, 7)\n assert find_peak_near_frequency(3.7e9, fft_freqs, fft_mx) == (6e9, 6)\n # assert find_peak_near_frequency(4e9, fft_freqs, [fft_mx, fft_my]) ==\n # None # no simultaneous peak\n\n # Just to check special cases, boundary cases etc.\n assert find_peak_near_frequency(1e9, fft_freqs, fft_mx) == (1e9, 1)\n assert find_peak_near_frequency(0.9e9, fft_freqs, fft_mx) == (1e9, 1)\n assert find_peak_near_frequency(1.1e9, fft_freqs, fft_mx) == (1e9, 1)\n assert find_peak_near_frequency(-0.1e9, fft_freqs, fft_mx) == (1e9, 1)\n assert find_peak_near_frequency(20e9, fft_freqs, fft_mx) == (6e9, 6)\n\n assert find_peak_near_frequency(-0.5e9, fft_freqs, fft_mz) == (0e9, 0)\n assert find_peak_near_frequency(0.5e9, fft_freqs, fft_mz) == (0e9, 0)\n assert find_peak_near_frequency(1e9, fft_freqs, fft_mz) == (0e9, 0)\n assert find_peak_near_frequency(6e9, fft_freqs, fft_mz) == (8e9, 8)\n assert find_peak_near_frequency(8e9, fft_freqs, fft_mz) == (8e9, 8)\n assert find_peak_near_frequency(9e9, fft_freqs, fft_mz) == (8e9, 8)\n\n with pytest.raises(ValueError):\n # An error should be raised if fft_vals doesn't have the same\n # length as fft_freqs.\n find_peak_near_frequency(2.5e9, fft_freqs, fft_vals=[0, 1])", "def close_to_exceeding(self) -> bool:\n mean = self.current / self.num_cuts\n if self.max_frames is not None:\n return self.current + mean > self.max_frames\n if self.max_samples is not None:\n return self.current + mean > self.max_samples\n if self.max_duration is not None:\n return self.current + mean > self.max_duration\n return False", "def _inside_bounds(A, B):\n for axis in 'xyz':\n minA, maxA = axis_bounds(A, axis)\n minB, maxB = axis_bounds(B, axis)\n if (minA <= minB) or (maxA >= maxB):\n return False\n\n return True", "def analyze_wfs(self, n_bsl, pic_name, peak_height=0.001, peak_prominences=0.0001, compact=True):\n\n print(\"---------------------------------\")\n print(\"Analyzing waveforms to get maxima\")\n print(\"---------------------------------\")\n\n # Creo una progress bar per rendere piu' fruibile visivamente il programma\n bar = progressbar.ProgressBar(maxval=self.number_of_events,\n widgets=[progressbar.Bar(\"=\", \"[\", \"]\"), \" \", progressbar.Percentage()])\n bar.start()\n counter = 0\n # Ora faccio un loop sugli eventi..\n if compact:\n for event in range(0, len(self.table_sipm_time['ev']), 9):\n # ..e chiamo la funzione analyze_ev_wf per ogni evento\n peaks_dataframe = self.analyze_ev_wf_compact(\n event, n_bsl, pic_name, peak_height, peak_prominences)\n\n # I parametri dei picchi sono quindi salvati nella tabella finale dei risultati\n self.wf_peaks = pd.concat(\n [self.wf_peaks, peaks_dataframe], ignore_index=True)\n bar.update(counter+1)\n counter += 9\n else:\n for event in self.table_sipm_time['ev']:\n # ..e chiamo la funzione analyze_ev_wf per ogni evento\n peaks_time, peaks_ampl = self.analyze_ev_wf(\n event, n_bsl, pic_name, peak_height, peak_prominences)\n\n # I parametri dei picchi sono quindi salvati nella tabella finale dei risultati\n self.wf_peaks = pd.concat([self.wf_peaks, pd.DataFrame(\n {'t': peaks_time, 'A': peaks_ampl})], ignore_index=True)\n bar.update(counter+1)\n counter += 1\n\n bar.finish()\n print(\"Events: \"+str(len(self.table_sipm_time['ev'])))\n print(\"---------------------------------\")\n print(\"Waveform analysis completed!\")\n # Devo ora ricavare di nuovo i Dt dai tempi assoluti, utilizzando la funzione diff()..\n self.wf_peaks['dt'] = self.wf_peaks['t'].diff()\n # ..e scartando il primo valore (che non ha un Dt)\n self.wf_peaks = self.wf_peaks.iloc[1:]\n print('Found {:d} peaks in waveforms\\n'.format(len(self.wf_peaks)))", "def _check_following_time_interval_threshold(data, index, time_window, threshold, min_count):\n\n\t# define the start slice\n\tstart_slice = index + 1\n\t# define the end slice, it will be the start slice plus or minus (depending on the operator) the time windows\n\tend_slice = start_slice + time_window\n\n\t# return True or False if the window contains more than the min_count\n\treturn ((data[start_slice:end_slice] > threshold).sum()) >= min_count", "def many_events(start_time,end_time,subevent_bools):\r\n \r\n #running through for each event\r\n for j in range(len(start_time)):\r\n \r\n #start, end, and subevent bool for this event\r\n st = start_time[j]\r\n et = end_time[j]\r\n subevent = bool(subevent_bools[j])\r\n \r\n #checking if start time is actually available\r\n if str(st) != 'nan':\r\n try:\r\n st = parse(st)\r\n yes_st = True\r\n except ValueError:\r\n yes_st = False\r\n else:\r\n yes_st = False\r\n \r\n #checking if end time is actually available\r\n if str(et) != 'nan':\r\n try:\r\n et = parse(et)\r\n yes_et = True\r\n except ValueError:\r\n yes_et = False\r\n else:\r\n yes_et = False\r\n \r\n #if both start and end times are available, running the code\r\n if yes_st and yes_et:\r\n #event must be after Nov. 2010 because currently no capability for\r\n #instruments in use before then - change this if you have that\r\n #capability\r\n if st > datetime(2010,9,1):\r\n try:\r\n print('got start and end times! running database extraction') \r\n database_extraction(st,et,instrument_chosen,subevent)\r\n except:\r\n continue\r\n else:\r\n print('cannot run for events before November 2010 because do not have '\r\n 'access to instruments before then')", "def identify_flux(xyz: list) -> list:\n flagged_lines = []\n\n for line in xyz:\n *orig,dollar_amount,pct_amount = line\n if abs(dollar_amount) > THRESHOLDS[0] and abs(pct_amount) > THRESHOLDS[1]:\n flagged_lines.append(line)\n\n\n\n\n return flagged_lines", "def get_following_peak(ind_spike, sig, sign):\n sig1 = sig[:-2]\n sig2 = sig[1:-1]\n sig3 = sig[2:]\n if sign == '+':\n all_peaks, = np.where(numexpr.evaluate( '(sig1<=sig2) & ( sig2>sig3)'))\n elif sign == '-':\n all_peaks, = np.where(numexpr.evaluate( '(sig1>=sig2) & ( sig2<sig3)'))\n \n ind_peaks = -np.ones(ind_spike.size, dtype = 'i')\n for i, ind in enumerate(ind_spike):\n possible = all_peaks[all_peaks>ind]\n if possible.size>0:\n ind_peaks[i] = possible[0]\n \n return ind_peaks", "def isAtomAssigned(atom, toPeaks=False):\n\n atomSet = atom.atomSet\n if atomSet and atomSet.resonanceSets:\n if toPeaks:\n for resonanceSet in atomSet.resonanceSets:\n for resonance in resonanceSet.resonances:\n if resonance.peakDimContribs:\n return True\n else:\n return True\n \n return False", "def has_neighbor(peak, peak_list, min_dist):\n for testpeak in peak_list:\n if (distance.euclidean(peak, testpeak) < min_dist):\n return True\n return False", "def get_steps_between_peaks(self):\n max_x, max_y = self.get_local_maxes()\n full_steps = np.ediff1d(max_x)\n # _full_mean, _full_std = np.mean(full_steps), np.std(full_steps)\n _full_count = len(full_steps)\n\n unique_steps_between_peaks, unique_steps_counts = np.unique(full_steps, return_counts=True)\n\n _filter = np.logical_and(full_steps < unique_steps_between_peaks[np.argmax(unique_steps_counts)] * 1.7,\n full_steps > unique_steps_between_peaks[np.argmax(unique_steps_counts)] * 0.3)\n # 1.7 chosen as filter, as there seems to be another peak ~2* (probably due to single missed peaks)\n # 1.7 avoids the start of the gaussian at 2*\n\n if not _filter.all():\n steps = full_steps[_filter]\n # print(unique_steps_between_peaks[np.argmax(unique_steps_counts)])\n _filtered_count = len(steps)\n _counts = (_full_count, _filtered_count, _full_count - _filtered_count)\n # print('Original Count: %s, Filtered Count: %s, Excluded Count: %s' % _counts)\n # print('Filtered:', full_steps[np.invert(_filter)])\n unique_steps_between_peaks, unique_steps_counts = np.unique(steps, return_counts=True)\n else:\n steps = full_steps\n\n return steps, unique_steps_between_peaks, unique_steps_counts", "def peakRecognition(y, sg_window, threshold):\n\n corrected_sg2 = savgol_filter(\n y, window_length=sg_window, polyorder=3, deriv=2)\n\n peaks_all = []\n\n for row in corrected_sg2:\n peaks = argrelmin(row)[0]\n peaks = [peak for peak in peaks if row[peak] < -threshold] # Remove peaks below threshold\n\n # Combine peaks w/o positive 2nd derivative between them\n peak_condensing = []\n peaks_condensed = []\n for j in range(len(row)):\n if j in peaks:\n peak_condensing.append(j)\n if row[j] > 0 and len(peak_condensing) > 0:\n peaks_condensed.append(int(np.mean(peak_condensing)))\n peak_condensing = []\n if len(peak_condensing) > 0:\n peaks_condensed.append(int(np.mean(peak_condensing)))\n\n peaks_all.append(peaks_condensed)\n bar3.update(bar3.value + 1)\n\n return peaks_all", "def can_attend_meetings(intervals: List[List[int]]) -> bool:\n intervals.sort()\n for i in range(1, len(intervals)):\n if intervals[i][0] < intervals[i - 1][1]:\n return False\n return True", "def clean(data, N_peaks, f_interval=None, f_resolution=None, sampling=None, w_column=None):\n print('-------------------------- clean')\n \n # Avoid overwritting data:\n data0 = data.copy()\n\n # Standard frequency resolution:\n T = data0[-1,0]-data[0,0]\n if f_resolution==None:\n f_resolution = 1/T\n \n # Avoid 0 as input as not peaks are found:\n if f_interval[0]==0:\n f_interval = [f_resolution, f_interval[1]]\n \n # Constants:\n SAMPLING = 1\n f_RES = 0.1*f_resolution # Standard frequency resolution\n picon = 2*np.pi*data0[:,0] # Optimization constant\n f_peaks = np.zeros(N_peaks)\n A_peaks = np.zeros(N_peaks)\n \n for i in range(N_peaks):\n k = i+1\n print '%s. Peak' %k\n\n # 1. Iteration - start finding largest peak:\n Pf_power, _, _, _, = tt.power(data0, f_interval, f_resolution, sampling, w_column)\n f = Pf_power[:,0]; P = Pf_power[:,1]; j = np.nanargmax(P)\n f_int = (f[j-1], f[j+1]) # Smaller f_int (Tuple instead of array for optimization)\n\n # Testing that the frequency resolution > sigma_f to continue:\n A_peak = P[j]\n A_av = np.mean(np.sqrt(P))\n sigma_a = 0.8*A_av\n sigma_phi = sigma_a/A_peak\n sigma_f = np.sqrt(3)*sigma_phi/(np.pi*T)\n if f_RES>sigma_f: \n \n # 2. Iteration: uses now f_res and so on..\n Pf_power, _, _, _, = tt.power(data0, f_int, f_RES, SAMPLING, w_column)\n f = Pf_power[:,0]; P = Pf_power[:,1]; j = np.nanargmax(P)\n f_int = (f[j-1], f[j+1])\n \n # 3. Iteration: last\n Pf_power, P_comp, _, _, = tt.power(data0, f_int, f_RES, SAMPLING, w_column)\n f = Pf_power[:,0]; P = Pf_power[:,1]; j = np.nanargmax(P)\n fpicon = picon*f[j] # Optimization constant\n alpha = P_comp[:,0]; beta = P_comp[:,1]\n alpha0 = alpha[j]*np.sin(fpicon)\n beta0 = beta[j]* np.cos(fpicon)\n data0[:,1] = data0[:,1] - alpha0 - beta0\n f_peaks[i] = f[j]\n A_peaks[i] = np.sqrt(P[j])\n\n # Output:\n St_clean = data0\n print f_peaks, A_peaks\n return St_clean, f_peaks, A_peaks", "def maximums(self):\n # The maximums are at the peaks position but might be swallowed by \n # other peaks\n maximums = list()\n for func, pos, height, width in zip(self.peaks_function,\n self.peaks_position,\n self.peaks_height,\n self.peaks_width):\n val = func(pos, pos, height, width)\n if val >= self.__call__(pos, count=False)[0]:\n maximums.append((val, pos))\n return sorted(maximums, reverse=True)", "def find_peaks(self, t_measure):\n self._check_time(t_measure)\n #widths = np.arange(2,7) # range of widths to check by find_peaks_cwt\n #peak_nodes = find_peaks_cwt(self.get_velocities(t_measure), widths, min_snr=2.0,noise_perc=30.0)\n peak_beads = peakutils.peak.indexes(self.get_velocities(t_measure), thres=0.75, min_dist=7)\n return peak_beads", "def _is_out_of_range(self, signal, y_range, threshold):\n out_of_range = [s for s in signal if s < y_range.min or s > y_range.max]\n out_of_range_percentage = len(out_of_range) / len(signal)\n\n return out_of_range_percentage > threshold", "def summarize(self, data, order=11, verbose=False):\n self.intervals = np.diff(self.timebase[self.onsets]) # event intervals\n i_decay_pts = int(2*self.taus[1]/self.dt) # decay window time (points)\n self.peaks = []\n self.smpkindex = []\n self.smoothed_peaks = []\n self.amplitudes = []\n self.Qtotal = []\n self.averaged = False # set flags in case of no events found\n self.individual_events = False\n self.fitted = False\n self.fitted_tau1 = np.nan\n self.fitted_tau2 = np.nan\n self.Amplitude = np.nan\n self.avg_fiterr = np.nan\n ndata = len(data)\n avgwin = 5 # int(1.0/self.dt) # 5 point moving average window for peak detection\n# print('dt: ', self.dt)\n mwin = int((0.050)/self.dt)\n# print('mwin: ', mwin)\n #order = int(0.0004/self.dt)\n # print('onsets: ', self.onsets)\n if self.sign > 0:\n nparg = np.greater\n else:\n nparg = np.less\n if len(self.onsets) > 0: # original events\n# print('no: ', len(self.onsets))\n acceptlist = []\n for j in range(len(data[self.onsets])):\n if self.sign > 0 and self.eventstartthr is not None:\n if self.data[self.onsets[j]] < self.eventstartthr:\n continue\n if self.sign < 0 and self.eventstartthr is not None:\n if self.data[self.onsets[j]] > -self.eventstartthr:\n continue\n svwinlen = data[self.onsets[j]:(self.onsets[j]+mwin)].shape[0]\n if svwinlen > 11:\n svn = 11\n else:\n svn = svwinlen\n if svn % 2 == 0: # if even, decrease by 1 point to meet ood requirement for savgol_filter\n svn -=1\n \n if svn > 3: # go ahead and filter\n p = scipy.signal.argrelextrema(scipy.signal.savgol_filter(data[self.onsets[j]:(self.onsets[j]+mwin)], svn, 2), nparg, order=order)[0]\n else: # skip filtering\n p = scipy.signal.argrelextrema(data[self.onsets[j]:(self.onsets[j]+mwin)], nparg, order=order)[0]\n if len(p) > 0:\n self.peaks.extend([int(p[0]+self.onsets[j])])\n amp = self.sign*(self.data[self.peaks[-1]] - data[self.onsets[j]])\n\n self.amplitudes.extend([amp])\n i_end = i_decay_pts + self.onsets[j] # distance from peak to end\n i_end = min(ndata, i_end) # keep within the array limits\n if j < len(self.onsets)-1:\n if i_end > self.onsets[j+1]:\n i_end = self.onsets[j+1]-1 # only go to next event start\n move_avg, n = moving_average(data[self.onsets[j]:i_end], n=min(avgwin, len(data[self.onsets[j]:i_end])))\n if self.sign > 0:\n pk = np.argmax(move_avg) # find peak of smoothed data\n else:\n pk = np.argmin(move_avg)\n self.smoothed_peaks.extend([move_avg[pk]]) # smoothed peak\n self.smpkindex.extend([self.onsets[j]+pk])\n acceptlist.append(j)\n if len(acceptlist) < len(self.onsets):\n if verbose:\n print('Trimmed %d events' % (len(self.onsets)-len(acceptlist)))\n self.onsets = self.onsets[acceptlist] # trim to only the accepted values\n # print(self.onsets)\n self.avgevent, self.avgeventtb, self.allevents = self.average_events(self.onsets) \n if self.averaged:\n self.fit_average_event(self.avgeventtb, self.avgevent, debug=False)\n \n else:\n if verbose:\n print('No events found')\n return", "def early_stop(val_loss):\n\n assert isinstance(val_loss, list)\n\n if val_loss[-1] > val_loss[-2] > val_loss[-3] > val_loss[-4] > val_loss[-5] > val_loss[-6]:\n return True\n else:\n return False", "def check_gaus_fit(hist):\n s = ROOT.TSpectrum(1)\n s.Search(hist, 1, \"new\")\n peaks_buff = s.GetPositionX()\n x_peak = peaks_buff[0]\n\n return (abs(hist.GetFunction('gaus').GetParameter(1) - x_peak) / abs(x_peak)) < 0.1", "def isSubset(self, other):\n for val, freq in self.items():\n if freq > other.freq(val):\n return False\n return True", "def peak_to_subpeak_list(chrom,start,end):\n num_subpeaks = int(end) - int(start) // 60\n start_list = list(range(start,end,60))\n end_list = start_list[1:] \n end_list.append(start_list[-1] + 60)\n subpeak_lists = [(chrom,s,e) for s,e in zip(start_list,end_list)]\n return subpeak_lists", "def containsManyPos(self, aerial_pos_list):\n # Get boundary points\n ordered_pts = self.boundary_pts.order_by('order')\n path_pts = [[wpt.position.gps_position.latitude,\n wpt.position.gps_position.longitude]\n for wpt in ordered_pts]\n # First check enough points to define a polygon\n if len(path_pts) < 3:\n return [False] * len(aerial_pos_list)\n\n # Create path to use for testing polygon inclusion\n path_pts.append(path_pts[0])\n path = mplpath.Path(np.array(path_pts))\n\n # Test each aerial position for altitude\n results = list()\n for aerial_pos in aerial_pos_list:\n # Check altitude bounds\n alt = aerial_pos.altitude_msl\n altitude_check = (alt <= self.altitude_msl_max\n and alt >= self.altitude_msl_min)\n results.append(altitude_check)\n\n # Create a list of positions to test whether inside polygon\n polygon_test_point_ids = [cur_id\n for cur_id in range(len(aerial_pos_list))\n if results[cur_id]]\n if len(polygon_test_point_ids) == 0:\n return results\n polygon_test_points = [[aerial_pos_list[cur_id].gps_position.latitude,\n aerial_pos_list[cur_id].gps_position.longitude]\n for cur_id in polygon_test_point_ids]\n\n # Test each point for inside polygon\n polygon_test_results = path.contains_points(\n np.array(polygon_test_points))\n for test_id in range(len(polygon_test_point_ids)):\n cur_id = polygon_test_point_ids[test_id]\n results[cur_id] = (polygon_test_results[test_id] == True)\n\n return results", "def is_peak_hours(time):\n if not 1 <= time.isoweekday() <= 5:\n return False\n if time.hour in [6, 7, 8, 18, 19, 20]:\n return True\n\n return False", "def verif_prediction(sequence, event) :\n found=False\n for e in sequence:\n if event[1][1]<e[1]:\n return found\n if e[0]==event[0]:\n if event[1][0]<=e[1]<=event[1][1]:\n found=True\n return found\n return found", "def test_viable(self,outs):\n \n viable = True\n for i,temp_i in enumerate(outs):\n if (temp_i <= self.mins[i+4]):\n viable = False\n elif (temp_i >= self.maxes[i+4]): \n viable = False\n return viable", "def _peakdetect_parabole_fitter(raw_peaks, x_axis, y_axis, points):\n func = lambda x, k, tau, m: k * ((x - tau) ** 2) + m\n fitted_peaks = []\n for peak in raw_peaks:\n index = peak[0]\n x_data = x_axis[index - points // 2: index + points // 2 + 1]\n y_data = y_axis[index - points // 2: index + points // 2 + 1]\n # get a first approximation of tau (peak position in time)\n tau = x_axis[index]\n # get a first approximation of peak amplitude\n m = peak[1]\n \n # build list of approximations\n # k = -m as first approximation?\n p0 = (-m, tau, m)\n popt, pcov = curve_fit(func, x_data, y_data, p0)\n # retrieve tau and m i.e x and y value of peak\n x, y = popt[1:3]\n \n # create a high resolution data set for the fitted waveform\n x2 = np.linspace(x_data[0], x_data[-1], points * 10)\n y2 = func(x2, *popt)\n \n fitted_peaks.append([x, y, [x2, y2]])\n \n return fitted_peaks", "def check_maxdif(data, tables, detrend=True, detrend_kw={'how':'movingmean', 'window':900}):\n from . import signal as pmdata\n from matplotlib import pyplot as plt\n\n detrended = pmdata.detrend(data, suffix='', **detrend_kw)\n maxdif = (detrended.max() - detrended.min()).abs()\n valid = tables.loc['dif_limits'] - maxdif\n valid = ~(valid < 0)\n\n return valid", "def inside_exons(i, exons):\n for exon in exons:\n if i in range1(exon[0], exon[1]):\n return(True)\n return(False)", "def find_exceedences(temp, clim):\n exceed_bool = temp - clim[\"thresh\"]\n exceed_bool[exceed_bool <= 0] = False\n exceed_bool[exceed_bool > 0] = True\n\n # Find contiguous regions of exceed_bool = True\n events, n_events = ndimage.label(exceed_bool)\n return events, n_events", "def eeg_peaks(array,tim,onset,plot='false'):\n\tp1_i,n1_i,p2_i = onset+56,onset+104,onset+176\n\twin_p1,win_n1,win_p2 = 15,20,40\n\t# determine P1,N1 and P2 values on the basis of the maximum in GFP in a window around the expected values\n\tidx_p1 = np.logical_and(tim>p1_i-win_p1, tim<p1_i+win_p1)\n\tidx_n1 = np.logical_and(tim>n1_i-win_n1, tim<n1_i+win_n1)\n\tidx_p2 = np.logical_and(tim>p2_i-win_p2, tim<p2_i+win_p2)\n\tp1 = np.max(array[idx_p1])\n\ttp1 = tim[idx_p1][array[idx_p1].argmax()]\n\tn1 = np.min(array[idx_n1])\n\ttn1 = tim[idx_n1][array[idx_n1].argmin()]\n\tp2 = np.max(array[idx_p2])\n\ttp2 = tim[idx_p2][array[idx_p2].argmax()]\n\n\tlineax = dict(linewidth=1, color='black', linestyle='--')\n\tlinep1 = dict(linewidth=1, color='red', linestyle='--')\n\tlinen1 = dict(linewidth=1, color='green', linestyle='--')\n\tlinep2 = dict(linewidth=1, color='blue', linestyle='--')\n\n\tif plot == 'true':\t\t\n\t\tfig = plt.figure(19,figsize=[7,5])\n\t\tax = fig.add_subplot(111, autoscale_on=False, xlim=[onset-100,tp2+200], ylim=[1.25*np.min([p1,n1,p2]),1.25*np.max([p1,n1,p2])])\n\t\tplt.plot(tim,array,'k-',lw=3)\n\t\tplt.plot(tp1,p1,'ro')\n\t\tplt.plot(tn1,n1,'go')\n\t\tplt.plot(tp2,p2,'bo')\n\t\tax.axvline(p1_i-win_p1,**linep1)\n\t\tax.axvline(p1_i+win_p1,**linep1)\n\t\tax.axvline(n1_i-win_n1,**linen1)\n\t\tax.axvline(n1_i+win_n1,**linen1)\n\t\tax.axvline(p2_i-win_p2,**linep2)\n\t\tax.axvline(p2_i+win_p2,**linep2)\n\t\tax.axhline(**lineax)\n\t\tplt.text(tp1-120,1.25*p1,'P1 = %.2f muV at %.0f ms' %(p1,tp1),fontsize=10)\n\t\tplt.text(tn1-40,1.1*n1,'N1 = %.2f muV at %.0f ms' %(n1,tn1),fontsize=10)\n\t\tplt.text(tn1+40,1.1*p2,'P2 = %.2f muV at %.0f ms' %(p2,tp2),fontsize=10)\n\t\tplt.xlabel('time (ms)',fontsize = 13)\n\t\tplt.ylabel('Amplitude',fontsize = 13)\n\treturn [p1,n1,p2,tp1,tn1,tp2]", "def __thresholdInput(self,samples):\n absSamples = np.abs(samples) # 1 ms\n thresh = self.peakThresholdScale*np.mean(absSamples) # 0.2 ms\n i = np.where(absSamples>thresh)[0] # 1e-5 s\n samples[i] = thresh * (samples[i]/absSamples[i]) # 8e-5 s\n # Do it again in case the spikes were really loud\n absSamples[i] = np.abs(samples[i])\n thresh = self.peakThresholdScale*np.mean(absSamples)\n i = np.where(absSamples>thresh)[0]\n self.clippedPeakIPure = i # All peaks that are clipped at first round are clipped again. Requires that the peaks in first round are not set to 0\n samples[i] = thresh * (samples[i]/absSamples[i])\n # Mark peaks close to each other\n if len(self.clippedPeakIPure)>0:\n # t = time.time()\n # Mark peaks close to each other as continuous\n diffPeaks = np.diff(self.clippedPeakIPure)\n gapsAll = np.where(diffPeaks>1)[0]\n self.peakMinGap = 100\n gaps = np.where(diffPeaks[gapsAll] < self.peakMinGap)[0] # find gaps smaller than 100\n gapsLen = diffPeaks[gapsAll[gaps]] # length of the gaps\n gapsIdx = gapsAll[gaps] # Index of all gaps\n\n\n # fill the gaps smaller than self.peakMinGap\n pp = np.zeros(self.Nfft,dtype=np.int8)\n pp[self.clippedPeakIPure] = 1\n for i in range(len(gapsLen)):\n pp[self.clippedPeakIPure[gapsIdx[i]]:self.clippedPeakIPure[gapsIdx[i]]+gapsLen[i]] = 1\n\n self.clippedPeakI = np.where(pp==1)[0]\n else:\n self.clippedPeakI = self.clippedPeakIPure.copy()\n if log.level == logging.DEBUG:\n log.debug('clipped peaks ' + str(len(self.clippedPeakIPure)))", "def compatible(self, history):\n return (isinstance(history, CheckpointedMultipleHistory) and\n np.array([self.mapHistory(s, history) is not None \n for s in self.subs]).all())", "def _more_events(klass, series, date_data):\n\n if (series.ends_after\n and len(series.events) >= series.num_occurrences\n or series.ends_on\n and date_data['start_date'] > series.recurrence_end_date):\n return False\n return True", "def _peaktimes(x, prc=95, t_buffer=.01, fs=1000):\n if np.logical_or(prc < 0, prc >= 100):\n raise ValueError('Percentile threshold must be between 0 and 100.')\n\n samp_buffer = np.int(np.round(t_buffer * fs))\n hi = x > np.percentile(x, prc)\n event_intervals = _chunk_time(hi, samp_buffer=samp_buffer)\n E = np.int(np.size(event_intervals) / 2)\n events = np.zeros(E, dtype=object)\n\n for e in range(E):\n temp = x[np.arange(event_intervals[e][0], event_intervals[e][1] + 1)]\n events[e] = event_intervals[e][0] + np.argmax(temp)\n\n return events", "def maxpeaks(sig):\n diff_sig = np.diff(sig)\n\n return np.sum([1 for nd in range(len(diff_sig[:-1])) if (diff_sig[nd+1]<0 and diff_sig[nd]>0)])", "def charged_phis(particlelist, ptmin=0.2, ptmax=2.0, etacut=1.0):\n\n ncharges = 0\n phis_in_event = []\n for particle in particlelist:\n if (particle.charge != 0\n and particle.pt > ptmin and particle.pt < ptmax\n and abs(particle.pseudorap) < etacut):\n\n ncharges += 1\n phis_in_event.append(particle.phi)\n\n return (ncharges, phis_in_event)", "def calc_peak_mags(model_table, filts=[\"u\",\"g\",\"r\",\"i\",\"z\",\"y\",\"J\",\"H\",\"K\"], magidxs=[0,1,2,3,4,5,6,7,8]):\n\n # Initiaize peak mag dictionarts\n model_table_tts = {}\n model_table_mags = {}\n model_table_appmags = {}\n for filt, magidx in zip(filts, magidxs):\n model_table_tts[filt] = []\n model_table_mags[filt] = []\n model_table_appmags[filt] = []\n\n for row in model_table:\n t, lbol, mag = row[\"t\"], row[\"lbol\"], row[\"mag\"]\n for filt, magidx in zip(filts,magidxs):\n idx = np.where(~np.isnan(mag[magidx]))[0]\n if len(idx) == 0:\n model_table_tts[filt].append(np.nan)\n model_table_mags[filt].append(np.nan)\n model_table_appmags[filt].append(np.nan)\n else:\n ii = np.argmin(mag[magidx][idx])\n model_table_tts[filt].append(t[idx][ii])\n model_table_mags[filt].append(mag[magidx][idx][ii])\n model_table_appmags[filt].append(mag[magidx][idx][ii]+5*(np.log10(row[\"dist\"]*1e6) - 1))\n\n for filt, magidx in zip(filts, magidxs):\n model_table[\"peak_tt_%s\"%filt] = model_table_tts[filt]\n model_table[\"peak_mag_%s\"%filt] = model_table_mags[filt] \n model_table[\"peak_appmag_%s\"%filt] = model_table_appmags[filt] \n\n return model_table", "def analyze(self, event): \n trgObjects = Collection(event,self.trgColl)\n if self.trgMuMinPt!=None and self.trgMuMinPt>0:\n trgObjIdx = [ idx for idx,trg in enumerate(trgObjects) if getattr(trg,\"pt\")>self.trgMuMinPt and getattr(trg,self.trgBranch)==1]\n \n else:\n trgObjIdx = [ idx for idx,trg in enumerate(trgObjects) if getattr(trg,self.trgBranch)==1]\n \n \n if len(trgObjIdx)==0 and self.skipNoTrgEvt: \n return False\n\n passedPath= [ path for path in self.selectionPathList if getattr(event,path)]\n if len(self.selectionPathList)>0 and len(passedPath)==0:\n if self.skipNoTrgEvt:\n return False\n trgObjIdx=[]\n if len(trgObjIdx)==0:\n for br in self.branches:\n self.out.fillBranch(\"%s_%s\"%(self.outputColl,br),[])\n for col in self.recoColl:\n self.out.fillBranch(\"%s_isTrg\"%(col),0)\n if self.skipProbe or self.skipTag:\n return False\n else:\n Bmu_fired=0\n # print trgObjIdx\n for idx,col in zip(self.recoIdx,self.recoColl):\n out=getattr(event,idx)\n if out in trgObjIdx:\n self.out.fillBranch(\"%s_isTrg\"%(col),1)\n Bmu_fired+=1\n else:\n self.out.fillBranch(\"%s_isTrg\"%(col),0)\n\n if Bmu_fired==0 and self.skipProbe: \n return False \n if Bmu_fired>0 and Bmu_fired==len(trgObjIdx) and self.skipTag:\n return False\n \n for br in self.branches:\n out=[ getattr(trgObjects[idx],br) for idx in trgObjIdx ]\n self.out.fillBranch(\"%s_%s\"%(self.outputColl,br),out)\n return True", "def passed_muon_ID_checks(evt, rec_ndcs_arr, gen_ndcs_arr):\n # Note to self:\n # Using many if, return False statements is a very fast way\n # to leave the function as soon as this event is labeled bad.\n #--- Preselection ---#\n\n # Get the indices of reco muons that pass criteria.\n # good_tightid_ndcs = [ndx for ndx, ID in enumerate(list(evt.lep_tightId)) if ID == 1]\n # good_reliso_ndcs = [ndx for ndx, iso in enumerate(list(evt.lep_RelIso)) if iso < 0.35]\n # if not (good_tightid_ndcs == good_reliso_ndcs):\n # # Not the same muons.\n # return False\n # if not (len(good_tightid_ndcs) == 2):\n # # Need 2 muons per event.\n # return False\n # # if any(x != 1 for x in list(evt.lep_tightId)):\n # # return False\n # # if any(x > 0.35 for x in list(evt.lep_RelIso)):\n # # return False\n\n # rec_ndx_arr = np.array(good_reliso_ndcs) #list(evt.lep_id)\n # gen_ndx_arr = get_ndcs_gen(rec_ndx_arr, list(evt.lep_genindex))\n if not (len(rec_ndcs_arr) == len(gen_ndcs_arr)):\n return False\n rec_id_arr = np.array(evt.lep_id)[rec_ndcs_arr]\n gen_id_arr = np.array(evt.GENlep_id)[gen_ndcs_arr]\n if not all([rec_id == gen_id for rec_id, gen_id in zip(rec_id_arr, gen_id_arr)]):\n return False\n # Reco muons.\n # if len(rec_id_ls) != 2:\n # return False\n if sum(rec_id_arr) != 0: # OSSF.\n return False\n if any(abs(x) != 13 for x in rec_id_arr):\n return False\n # Gen muons.\n # if len(gen_id_ls) != 2:\n # return False\n if sum(gen_id_arr) != 0:\n return False\n if any(abs(x) != 13 for x in gen_id_arr):\n return False\n # Looks like a good event!\n return True\n\n # #--- Below was my first attempt: Pythonic and clever, but slow!\n # id_ls = list(evt.lep_id)\n # # Make list of pass/fail selections (bools):\n # selec_ls = [\n # # Reco muons.\n # len(list(evt.lep_id)) == 2, # 2 muons per event.\n # sum(list(evt.lep_id)) == 0, # OSSF.\n # all(abs(x) == 13 for x in list(evt.lep_id)),\n # # Gen muons.\n # len(list(evt.GENlep_id)) == 2, # 2 muons per event.\n # sum(list(evt.GENlep_id)) == 0, # OSSF.\n # all(abs(x) == 13 for x in list(evt.GENlep_id)),\n # # Additional cuts.\n # all(x == 1 for x in list(evt.lep_tightId)),\n # all(x < 0.35 for x in list(evt.lep_RelIso)),\n # ]\n # return all(selec_ls)\n # #--- Above is clever, but slow!" ]
[ "0.6195646", "0.607271", "0.5963362", "0.5910339", "0.5904927", "0.5832219", "0.57945627", "0.5773653", "0.563543", "0.5589451", "0.5549814", "0.553361", "0.55285126", "0.5508843", "0.5495379", "0.54413253", "0.54157656", "0.5363649", "0.532954", "0.5318515", "0.53176546", "0.5313904", "0.5270746", "0.5267198", "0.5257355", "0.524623", "0.5244575", "0.52402085", "0.5220869", "0.52094924", "0.5198512", "0.5193909", "0.5174969", "0.5173785", "0.5170119", "0.5169989", "0.515622", "0.5152927", "0.51512754", "0.5145629", "0.51347965", "0.512816", "0.5122943", "0.5118895", "0.5116673", "0.511368", "0.51100814", "0.5097393", "0.50875944", "0.5083681", "0.50756943", "0.5071364", "0.50600255", "0.50554204", "0.5053425", "0.50344044", "0.5028635", "0.5026937", "0.5025334", "0.5017962", "0.5016253", "0.50153446", "0.50096196", "0.49998543", "0.49933067", "0.49927652", "0.4987708", "0.4986989", "0.498688", "0.49819478", "0.49790674", "0.4978688", "0.49774545", "0.49741656", "0.49719405", "0.49714914", "0.49651626", "0.4960841", "0.4959927", "0.4958056", "0.4955079", "0.49522808", "0.49513644", "0.49476692", "0.49464235", "0.49394622", "0.49384204", "0.4937721", "0.49374098", "0.4936012", "0.49352098", "0.49348092", "0.49299586", "0.49279165", "0.49253705", "0.49200758", "0.4919118", "0.49182755", "0.49136987", "0.4907643" ]
0.82217896
0
takes in lists of start times and end times to create a list of time windows, and a list of whether or not an event is a subevent, and uses those lists to run functions that extract data from the GOES database. Each list must have the same length, and indices of lists must correspond (ie start_time[j] has an end time of end_time[j] and its subevent boolean is subevent_bools[j]). not to be confused with multi_events, which generates output given multiple events within one time window.
def many_events(start_time,end_time,subevent_bools): #running through for each event for j in range(len(start_time)): #start, end, and subevent bool for this event st = start_time[j] et = end_time[j] subevent = bool(subevent_bools[j]) #checking if start time is actually available if str(st) != 'nan': try: st = parse(st) yes_st = True except ValueError: yes_st = False else: yes_st = False #checking if end time is actually available if str(et) != 'nan': try: et = parse(et) yes_et = True except ValueError: yes_et = False else: yes_et = False #if both start and end times are available, running the code if yes_st and yes_et: #event must be after Nov. 2010 because currently no capability for #instruments in use before then - change this if you have that #capability if st > datetime(2010,9,1): try: print('got start and end times! running database extraction') database_extraction(st,et,instrument_chosen,subevent) except: continue else: print('cannot run for events before November 2010 because do not have ' 'access to instruments before then')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def multi_event(st,et,instrument_chosen,subevent):\r\n print('checking for multiple events within given time window')\r\n \r\n #creating file for time window with first events for all thresholds\r\n out_name = Path(cfg.obs_path) / database_extraction(st,et,instrument_chosen,subevent)\r\n\r\n #creating files for all second events for all thresholds\r\n new_files = two_in_one(out_name,et,subevent)\r\n \r\n #creating files for any third events for all thresholds that had a second event\r\n for file in new_files:\r\n two_in_one(file,et,subevent) \r\n \r\n return", "def two_in_one(obs_file,et,subevent):\r\n \r\n #in this function, the \"original time window\" talked about in the comments\r\n #refers to the start and end times that were input to create the file obs_file,\r\n #which will likely have been created using the database_extraction function\r\n \r\n #opening first output file created by operational_sep_quantities\r\n with open(obs_file, 'r') as o:\r\n out = js.load(o)\r\n \r\n #all events recorded in that output file\r\n ongoing_events = (out['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['ongoing_events'])\r\n \r\n #creating lists for values from each event\r\n end_times = [] \r\n start_times = []\r\n energy_thresholds = []\r\n flux_thresholds = []\r\n out_names = []\r\n \r\n #appending values to lists for each event\r\n for i in range(len(ongoing_events)):\r\n start_times.append(parse(ongoing_events[i]['start_time']))\r\n end_times.append(parse(ongoing_events[i]['end_time']))\r\n energy_thresholds.append(ongoing_events[i]['energy_min'])\r\n flux_thresholds.append(float(ongoing_events[i]['threshold']))\r\n \r\n #checking if there was a second event for each threshold\r\n for i in range(len(end_times)):\r\n end = end_times[i]\r\n #if the end time of an event for any threshold was a day before the last day\r\n #in the original time window given, will check if ONLY THAT THRESHOLD\r\n #had another event after the first one, using the end time of the first\r\n #event of that threshold as the new start time of the event window\r\n if end.date() < et.date():\r\n print('end time to use as new start time: %s' %end)\r\n #figuring out which threshold this end time was for\r\n flux_thresh = int(flux_thresholds[i])\r\n energy_thresh = int(energy_thresholds[i])\r\n print('extracting second event for threshold ' + str(flux_thresh) + ' MeV '\r\n + str(energy_thresh) + ' pfu')\r\n #new start time (2 days in advance bc the database_extraction function\r\n #makes the start time 2 days prior, so will cancel that out)\r\n st = end + timedelta(days=2)\r\n #thresholds in correct format\r\n thresholds = str(energy_thresh) + ',' + str(flux_thresh)\r\n print('thresholds: %s' %thresholds)\r\n #creating observation data for second event for thresholds given\r\n out_names.append(Path(cfg.obs_path) /\r\n database_extraction(st,et,instrument_chosen,subevent,\r\n thresholds = thresholds,\r\n one_thresh = True))\r\n \r\n #returns list of all new files created by this function\r\n return(out_names)", "def _split_events_per_trial(t_idx, codes: np.ndarray, times: np.ndarray, params: dict) -> dict:\n codes, times = _check_input(codes, times)\n trial_to_condition_func = eval(params['trial_to_condition_func'], {}, {})\n cnd_number = np.int16(trial_to_condition_func(codes, t_idx))\n assert np.isscalar(cnd_number)\n\n start_times, end_times = _get_times_subtrials(codes, times, params['subtrials'])\n trial_start_time, trial_end_time = _get_times_trial(codes, times, start_times, end_times, params)\n\n # this is due to ill design of trial window and subtrial window due to human error.\n assert np.all(trial_start_time <= start_times)\n assert np.all(trial_end_time >= end_times)\n\n event_code_idx = np.logical_and(times >= trial_start_time, times <= trial_end_time)\n\n return {\n 'start_times': start_times, # absolute\n 'end_times': end_times, # absolute\n 'trial_start_time': trial_start_time,\n 'trial_end_time': trial_end_time,\n 'event_times': times[event_code_idx],\n 'event_codes': codes[event_code_idx],\n 'condition_number': cnd_number\n }", "def compile_chrono_events(\n test_scenario: SimulationTestScenario, setup_events: List[SimulationEvent]\n) -> Tuple[List[SimulationEvent], Tuple[str, datetime]]:\n previous_subtest_last_event_ts = datetime.min.replace(tzinfo=timezone.utc)\n previous_subtest_last_assertion_ts = datetime.min.replace(tzinfo=timezone.utc)\n current_subtest_first_event_ts = datetime.max.replace(tzinfo=timezone.utc)\n current_subtest_first_assertion_ts = datetime.max.replace(tzinfo=timezone.utc)\n assertion_ts = []\n events = []\n derived_param_outputs = []\n\n for sub_test in test_scenario.sub_tests:\n if sub_test.events:\n current_subtest_first_event_ts = sub_test.events[0].time\n\n if current_subtest_first_event_ts < previous_subtest_last_event_ts:\n log.warning(\n f'Subtest \"{sub_test.description}\" contains '\n \"event timestamp before the previous one.\"\n )\n\n previous_subtest_last_event_ts = sub_test.events[-1].time\n events.extend(sub_test.events)\n\n if sub_test.expected_balances_at_ts:\n assertion_ts.extend(sub_test.expected_balances_at_ts.keys())\n\n if sub_test.expected_posting_rejections:\n assertion_ts.extend(\n expected_rejection.timestamp\n for expected_rejection in sub_test.expected_posting_rejections\n )\n if sub_test.expected_schedules:\n assertion_ts.extend(\n runtime\n for expected_schedule in sub_test.expected_schedules\n for runtime in expected_schedule.run_times\n )\n if sub_test.expected_workflows:\n assertion_ts.extend(\n runtime\n for expected_workflow in sub_test.expected_workflows\n for runtime in expected_workflow.run_times\n )\n\n if sub_test.expected_derived_parameters:\n for expected_derived_param in sub_test.expected_derived_parameters:\n assertion_ts.append(expected_derived_param.timestamp)\n derived_param_outputs.append(\n (\n expected_derived_param.account_id,\n expected_derived_param.timestamp,\n )\n )\n\n if assertion_ts:\n sorted_assertion_ts = sorted(assertion_ts)\n current_subtest_first_assertion_ts = sorted_assertion_ts[0]\n\n if current_subtest_first_assertion_ts < previous_subtest_last_assertion_ts:\n log.warning(\n f'Subtest \"{sub_test.description}\" contains '\n \"assertion timestamp before the previous one.\"\n )\n\n previous_subtest_last_assertion_ts = sorted_assertion_ts[-1]\n assertion_ts.clear()\n\n if (\n previous_subtest_last_event_ts > test_scenario.end\n or previous_subtest_last_assertion_ts > test_scenario.end\n ):\n log.warning(\"last assertion or event happens outside of simulation window\")\n\n if setup_events and events and setup_events[-1].time > events[0].time:\n raise ValueError(\n f\"First custom event at {events[0].time}, it needs to be after \"\n f\"{setup_events[-1].time}, when account and plan setup events are complete\"\n )\n\n return setup_events + events, derived_param_outputs", "def main():\n\n f = open(eventsfile, 'r')\n lines = f.readlines()\n numcounter = 0\n counter = 0\n fullcounter = 0\n movielist = []\n movielists =[]\n timestamp_list = []\n filteredlist = [] \n startdate = \"2020-02-26\"\n \n for line in lines:\n TAPES = line.split('\\t')\n if int(TAPES[2]) == 1 or int(TAPES[2]) == 2:\n filteredlist.append(line)\n \n for newline in filteredlist:\n TAPES = newline.split('\\t')\n fullcounter +=1\n if int(TAPES[2]) == 2:\n timestamp_list.append(0)\n continue\n startdate2 = startdate.split(\"-\")[1] + \"/\" + startdate.split(\"-\")[2] + \"/\" + startdate.split(\"-\")[0]\n dateplustime = startdate2 + TAPES[0][0:len(TAPES[0])]\n thistime = faststrptime(dateplustime)\n unixtimestamp = datetime.datetime.timestamp(thistime)\n timestamp_list.append(int(unixtimestamp))\n\n i = 0 \n for element in timestamp_list:\n\n if i < (len(timestamp_list)-1) and timestamp_list[i+(counter-i)]-timestamp_list[i] >= 3600:\n counter += 1\n i = counter\n movielist.append(counter)\n \n if len(movielist) <= 15:\n numcounter = 0\n j = 0\n for step in movielist:\n movielists[len(movielists)-1].append(movielist[j])\n j += 1\n movielist = []\n continue \n else:\n movielists.append(movielist)\n movielist = []\n numcounter = 0\n continue\n\n if i < (len(timestamp_list)-1) and timestamp_list[i+1]-timestamp_list[i] >= 3600:\n counter += 1\n i = counter\n movielist.append(counter)\n\n if len(movielist) <= 15:\n numcounter = 0\n j = 0\n for step in movielist:\n movielists[len(movielists)-1].append(movielist[j])\n j += 1\n movielist = []\n continue\n else:\n movielists.append(movielist)\n movielist = []\n numcounter = 0\n continue\n\n counter += 1\n numcounter += 1\n if element != 0:\n movielist.append(counter)\n i += 1\n \n if numcounter == 30:\n numcounter = 0\n movielists.append(movielist)\n movielist = []\n \n if i > (len(timestamp_list)-1):\n movielists.append(movielist)\n movielist = []\n numcounter = 0\n \n numendlists = counter - fullcounter\n first = len(movielists)-numendlists\n last = len(movielists)\n del movielists[first:last]\n \n for x in movielists:\n for y in x:\n if int(filenumber) == y:\n movielist = x\n\n modename = str(movielist[0]) + \"to\" + str(movielist[len(movielist)-1])\n modefilename = \"mode_\" + modename + \".png\"\n try:\n imread(modefilename)\n except:\n imageMode(modename,movielist)\n\n e = loadmodeImage(modefilename)\n \n roimask = np.zeros((ydim,xdim))\n f = open(roisfile, 'r')\n lines = f.readlines()\n i = 1\n i2 = 0\n for line in lines:\n try:\n print(int(line.split(' ')[0]))\n except ValueError:\n i2 += 1\n continue\n minx = int(line.split(' ')[0])\n miny = int(line.split(' ')[1])\n maxx = int(line.split(' ')[2])\n maxy = int(line.split(' ')[3])\n roimask[int(miny):int(maxy),int(minx):int(maxx)] = i\n i += 1\n numberofwells = i-1\n numberofcols = int(i2/2)\n numberofrows = int(numberofwells/numberofcols)\n roimaskweights = convertMaskToWeights(roimask)\n\n cap = cv2.VideoCapture(videoStream)\n\n cap.set(3,roimask.shape[1])\n cap.set(4,roimask.shape[0])\n \n ret,frame = cap.read()\n storedImage = np.array(e * 255, dtype = np.uint8)\n storedMode = Blur(storedImage)\n storedFrame = grayBlur(frame)\n cenData = np.zeros([ int(saveFreq), len(np.unique(roimaskweights))*2 -2])\n pixData = np.zeros([ int(saveFreq), len(np.unique(roimaskweights))])\n i = 0;\n totalFrames = 0\n while(cap.isOpened()):\n ret,frame = cap.read()\n if ret == False:\n break\n currentFrame = grayBlur(frame)\n diffpix = diffImage(storedFrame,currentFrame,pixThreshold)\n diff = trackdiffImage(storedMode,currentFrame,pixThreshold)\n diff.dtype = np.uint8\n contours,hierarchy = cv2.findContours(diff, cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)\n MIN_THRESH = 20.0\n MIN_THRESH_P = 20.0\n roi_dict = {}\n for r in range(0,numberofwells):\n roi_dict[r+1] = []\n for cs in range(0,len(contours)):\n if cv2.contourArea(contours[cs]) < 1.0:\n continue\n if cv2.arcLength(contours[cs],True) < 1.0:\n continue\n if cv2.contourArea(contours[cs]) > MIN_THRESH or cv2.arcLength(contours[cs],True) > MIN_THRESH_P:\n M = cv2.moments(contours[cs])\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n area = cv2.contourArea(contours[cs])\n perim = cv2.arcLength(contours[cs],True)\n if int(roimask[cY,cX]) == 0:\n continue\n if not roi_dict[int(roimask[cY,cX])]:\n roi_dict[int(roimask[cY,cX])].append((area*perim,cX,cY))\n else:\n if roi_dict[int(roimask[cY,cX])][0][0] < area*perim:\n roi_dict[int(roimask[cY,cX])][0] = (area*perim,cX,cY)\n\n pixcounts = []\n pixcounts = np.bincount(roimaskweights, weights=diffpix.ravel())\n pixData[i,:] = np.hstack((pixcounts))\n counts = []\n keys = roi_dict.keys()\n keys = sorted(keys)\n for k in keys:\n x = -10000\n y = -10000\n if roi_dict[k]:\n x = roi_dict[k][0][1]\n y = roi_dict[k][0][2]\n counts.append(x)\n counts.append(y)\n cv2.line(storedImage,(x,y),(x,y),(255,255,255),2)\n if i == 284:\n cv2.imwrite(videoStream + '_trackedimagewithlines_' + str(i) + \".png\", storedImage)\n cenData[i,:] = np.asarray(counts)\n totalFrames += 1\n storedFrame = currentFrame\n i += 1\n\n file = open(videoStream + \".centroid2\",'w')\n for x in range(0,frameRate):\n for y in range(0,numberofwells*2):\n file.write(str(int(cenData[x,:][y])) + '\\n')\n pixData = pixData[:i,:]\n pixData = pixData[:,1:] \n file = open(videoStream + \".motion2\",'w')\n for x in range(0,frameRate):\n for y in range(0,numberofwells):\n file.write(str(int(pixData[x,:][y])) + '\\n')\n\n cap.release()\n cv2.destroyAllWindows()\n \n try:\n image = Image.open('lastframe.png')\n except:\n makenumROIsimage()", "def events(time):\n\n event_list = eventlist()\n idx = np.all(time == event_list[:, 0:len(time)], axis=1)\n return event_list[idx,:]", "def _locate_events(self, start_time, end_time):\n\n # Define pre-pad as a function of the onset windows\n if self.pre_pad is None:\n self.pre_pad = max(self.p_onset_win[1],\n self.s_onset_win[1]) \\\n + 3 * max(self.p_onset_win[0],\n self.s_onset_win[0])\n\n # Adjust pre- and post-pad to take into account cosine taper\n t_length = self.pre_pad + 4*self.marginal_window + self.post_pad\n self.pre_pad += np.ceil(t_length * 0.06)\n self.post_pad += np.ceil(t_length * 0.06)\n\n trig_events = self.output.read_triggered_events(start_time, end_time)\n n_evts = len(trig_events)\n\n for i, trig_event in trig_events.iterrows():\n event_uid = trig_event[\"EventID\"]\n msg = \"=\" * 120 + \"\\n\"\n msg += \"\\tEVENT - {} of {} - {}\\n\"\n msg += \"=\" * 120 + \"\\n\\n\"\n msg += \"\\tDetermining event location...\\n\"\n msg = msg.format(i + 1, n_evts, event_uid)\n self.output.log(msg, self.log)\n\n w_beg = trig_event[\"CoaTime\"] - 2*self.marginal_window \\\n - self.pre_pad\n w_end = trig_event[\"CoaTime\"] + 2*self.marginal_window \\\n + self.post_pad\n\n timer = util.Stopwatch()\n self.output.log(\"\\tReading waveform data...\", self.log)\n try:\n self._read_event_waveform_data(trig_event, w_beg, w_end)\n except util.ArchiveEmptyException:\n msg = \"\\tNo files found in archive for this time period\"\n self.output.log(msg, self.log)\n continue\n except util.DataGapException:\n msg = \"\\tAll available data for this time period contains gaps\"\n msg += \"\\n\\tOR data not available at start/end of time period\\n\"\n self.output.log(msg, self.log)\n continue\n self.output.log(timer(), self.log)\n\n timer = util.Stopwatch()\n self.output.log(\"\\tComputing 4D coalescence grid...\", self.log)\n\n daten, max_coa, max_coa_norm, loc, map_4d = self._compute(\n w_beg, w_end,\n self.data.signal,\n self.data.availability)\n coord = self.lut.xyz2coord(np.array(loc).astype(int))\n event_coa_data = pd.DataFrame(np.array((daten, max_coa,\n coord[:, 0],\n coord[:, 1],\n coord[:, 2])).transpose(),\n columns=[\"DT\", \"COA\", \"X\", \"Y\", \"Z\"])\n event_coa_data[\"DT\"] = event_coa_data[\"DT\"].apply(UTCDateTime)\n event_coa_data_dtmax = \\\n event_coa_data[\"DT\"].iloc[event_coa_data[\"COA\"].astype(\"float\").idxmax()]\n w_beg_mw = event_coa_data_dtmax - self.marginal_window\n w_end_mw = event_coa_data_dtmax + self.marginal_window\n\n if (event_coa_data_dtmax >= trig_event[\"CoaTime\"]\n - self.marginal_window) \\\n and (event_coa_data_dtmax <= trig_event[\"CoaTime\"]\n + self.marginal_window):\n w_beg_mw = event_coa_data_dtmax - self.marginal_window\n w_end_mw = event_coa_data_dtmax + self.marginal_window\n else:\n msg = \"\\n\\tEvent {} is outside marginal window.\\n\"\n msg += \"\\tDefine more realistic error - the marginal window\"\n msg += \" should be an estimate of the origin time uncertainty,\"\n msg += \"\\n\\tdetermined by the expected spatial uncertainty and\"\n msg += \"the seismic velocity in the region of the earthquake\\n\"\n msg += \"\\n\" + \"=\" * 120 + \"\\n\"\n msg = msg.format(event_uid)\n self.output.log(msg, self.log)\n continue\n\n event_mw_data = event_coa_data\n event_mw_data = event_mw_data[(event_mw_data[\"DT\"] >= w_beg_mw) &\n (event_mw_data[\"DT\"] <= w_end_mw)]\n map_4d = map_4d[:, :, :,\n event_mw_data.index[0]:event_mw_data.index[-1]]\n event_mw_data = event_mw_data.reset_index(drop=True)\n event_max_coa = event_mw_data.iloc[event_mw_data[\"COA\"].astype(\"float\").idxmax()]\n\n # Update event UID; make out_str\n event_uid = str(event_max_coa.values[0])\n for char_ in [\"-\", \":\", \".\", \" \", \"Z\", \"T\"]:\n event_uid = event_uid.replace(char_, \"\")\n out_str = \"{}_{}\".format(self.output.name, event_uid)\n self.output.log(timer(), self.log)\n\n # Make phase picks\n timer = util.Stopwatch()\n self.output.log(\"\\tMaking phase picks...\", self.log)\n phase_picks = self._phase_picker(event_max_coa)\n self.output.write_picks(phase_picks[\"Pick\"], event_uid)\n self.output.log(timer(), self.log)\n\n # Determining earthquake location error\n timer = util.Stopwatch()\n self.output.log(\"\\tDetermining earthquake location and uncertainty...\", self.log)\n loc_spline, loc_gau, loc_gau_err, loc_cov, \\\n loc_cov_err = self._calculate_location(map_4d)\n self.output.log(timer(), self.log)\n\n # Make event dictionary with all final event location data\n event = pd.DataFrame([[event_max_coa.values[0],\n event_max_coa.values[1],\n loc_spline[0], loc_spline[1], loc_spline[2],\n loc_gau[0], loc_gau[1], loc_gau[2],\n loc_gau_err[0], loc_gau_err[1],\n loc_gau_err[2],\n loc_cov[0], loc_cov[1], loc_cov[2],\n loc_cov_err[0], loc_cov_err[1],\n loc_cov_err[2]]],\n columns=self.EVENT_FILE_COLS)\n\n self.output.write_event(event, event_uid)\n\n self._optional_locate_outputs(event_mw_data, event, out_str,\n phase_picks, event_uid, map_4d)\n\n self.output.log(\"=\" * 120 + \"\\n\", self.log)\n\n del map_4d, event_coa_data, event_mw_data, event_max_coa, \\\n phase_picks\n self.coa_map = None", "def parseEvents(data, times, eventTimes):\n striped = []\n remaining = range(len(times))\n stripedEvents = []\n\n for t in eventTimes:\n tmpEvent = t.date()\n for j in range(len(times)):\n tmpTime = times[j].date()\n\n if tmpEvent == tmpTime:\n striped.append(tmpEvent)\n stripedEvents.append(data[j, :])\n remaining.remove(j)\n break\n\n stripedEvents = np.array(stripedEvents)\n remainingTimes = np.array(remaining)\n stripedTimes = np.array(striped)\n remainingEvents = data[remaining]\n\n return stripedTimes, remainingTimes, stripedEvents, remainingEvents", "def get_events(raw,event_id,offset=0):\r\n # extract time stamps from annotations\r\n timestamps = np.round(raw._annotations.onset*raw.info['sfreq']+offset).astype(int)\r\n assert np.all(timestamps < raw.n_times), \"offset overflow total data length\"\r\n\r\n # get labels\r\n labels = raw._annotations.description\r\n labels = np.vectorize(event_id.__getitem__)(labels) #convert labels into int\r\n \r\n # build event matrix\r\n events = np.concatenate((timestamps.reshape(-1,1),\r\n np.zeros(timestamps.shape).astype(int).reshape(-1,1),\r\n labels.reshape(-1,1)),axis=1)\r\n \r\n # the difference between two full stimuli windows should be 7 sec. \r\n events = events[events[:, 2] < 100, :] #keep only events and remove annotations\r\n\r\n assert np.unique(events[:, 2]).size ==1 #TODO: make it works for different events\r\n \r\n stimt = np.append(events[:, 0], raw.n_times) #stim interval\r\n epochs2keep = np.where(np.diff(stimt) == raw.info['sfreq']*7)[0] #TODO: keep only epoch of 7sec (make it an argument)\r\n epochs2drop = np.where(np.diff(stimt) != raw.info['sfreq']*7)[0] #drop the rest\r\n\r\n return events, epochs2keep, epochs2drop", "def event_overlap(labels, half, timestamp, window):\n\n for l, _ in labels:\n if l[0] == half:\n ceil = l[1] + window//2\n floor = l[1] - window//2\n if timestamp <= ceil and timestamp >= floor:\n return True\n return False", "def test_overlap():\n events = [['Event', '2017-11-21T10:00:00-08:00', '2017-11-21T11:00:00-08:00'],\n ['Event', '2017-11-21T10:30:00-08:00', '2017-11-21T11:20:00-08:00']]\n freetimes, _ = free(events, 9, 0, 17, 0, day_range, 30)\n fmt_freetime = output_format(freetimes)\n for i in fmt_freetime:\n print(i)\n assert fmt_freetime == ['Tue, Nov 21, 9:00 am to Tue, Nov 21, 10:00 am.',\n 'Tue, Nov 21, 11:20 am to Tue, Nov 21, 5:00 pm.',\n 'Wed, Nov 22, 9:00 am to Wed, Nov 22, 5:00 pm.',\n 'Thu, Nov 23, 9:00 am to Thu, Nov 23, 5:00 pm.',\n 'Fri, Nov 24, 9:00 am to Fri, Nov 24, 5:00 pm.',\n 'Sat, Nov 25, 9:00 am to Sat, Nov 25, 5:00 pm.',\n 'Sun, Nov 26, 9:00 am to Sun, Nov 26, 5:00 pm.',\n 'Mon, Nov 27, 9:00 am to Mon, Nov 27, 5:00 pm.']", "def UTC_times(times, \n trace, \n diff_thres = 30.0):\n # set times values to seconds\n \n #AUTOMATE THIS SECTION!\n #CHECK THAT THIS IS CORRECT\n times = times / trace.stats.sampling_rate\n #remove unwanted parts of times numpy array \n times = times[:,0]\n \n #remove the first instance of time because it is \n #somehow always of the wrong format!\n #times = np.delete(times, 0) \n \n event_times = []\n event = [times[0]]\n \n start_time = trace.stats.starttime\n \n #for item in times:\n # print start_time + item\n\n for i in range(1, len(times)):\n \n # check if two events in times array have a difference < diff_thres, \n #if not, run average of those times, if so append that events to a \n #new events_times list\n \n #time_diff = times[i + 1] - times[i]\n \n time_diff = times[i] - times[i-1]\n\n #save info until events are far enough apart! \n if time_diff < diff_thres:\n\n event.append(times[i])\n \n \n #raise conditional for if events are far enough apart! \n else:\n\n event_start = event[0] - 2 #minus 5 seconds\n event_end = max(event) + 2 #add 5 seconds\n\n event_times.append([event_start, event_end])\n \n event = [] \n \n event.append(times[i])\n\n #if event still contains something for any reason, add it to event times\n if len(event) > 0: \n event_start = event[0] - 2 #minus 5 seconds\n event_end = max(event) + 2 #add 5 seconds\n event_times.append([event_start, event_end])\n event = [] \n \n\n\n #if len(event_times) == 0 and len(event) > 0 or time_diff > diff_thres and len(event) > 0:\n \n #event_times.append(sum(event) / len(event))\n \n # event_start = event[0] - 2 #minus 5 seconds\n # event_end = event[-1] + 2 #add 5 seconds\n \n # event_times.append([event_start, event_end])\n \n # event = []\n \n #event_times.append(times[i])\n \n # else:\n # event.append(times[i])\n \n\n UTC_events = []\n\n #earthquake length threshold is 10 seconds and above!\n eq_len = 0#5.0\n\n for i in event_times:\n estart = start_time + i[0]\n eend = start_time + i[1]\n \n if eend - estart > eq_len:\n UTC_events.append([estart, eend])\n \n #UTC_events = np.unique(np.asarray(UTC_events))\n\n \n return UTC_events", "def overlap_events(event1, event2, place1, place2, log_places):\n place1.start_event(event1)\n log_conflicts(event1.start_time, log_places)\n place2.start_event(event2)\n log_conflicts(event2.start_time, log_places)\n place1.end_event(event1)\n log_conflicts(event1.end_time, log_places)\n place2.end_event(event2)\n log_conflicts(event2.end_time, log_places)", "def time(self,orid_time,window=5):\n #{{{ Function to get possible matches of events for some epoch time.\n\n results = {}\n\n #\n # If running in simple mode we don't have access to the tables we need\n #\n if config.simple:\n return results\n\n orid_time = _isNumber(orid_time)\n\n if not orid_time:\n print \"Not a valid number in function call: %s\" % orid_time\n return\n \n start = float(orid_time)-float(window)\n end = float(orid_time)+float(window)\n\n dbname = self.dbcentral(orid_time)\n\n if not db:\n print \"No match for orid_time in dbcentral object: (%s,%s)\" % (orid_time,self.dbcentral(orid_time))\n return\n\n try: \n db = datascope.dbopen( dbname , 'r' )\n db.lookup( table='origin')\n db.query(datascope.dbTABLE_PRESENT) \n except Exception,e:\n print \"Exception on Events() time(%s): Error on db pointer %s [%s]\" % (orid_time,db,e)\n return\n\n db.subset( 'time >= %f' % start )\n db.subset( 'time <= %f' % end )\n\n try:\n db = datascope.dbopen( dbname , 'r' )\n db.lookup( table='wfdisc' )\n records = db.query(datascope.dbRECORD_COUNT)\n\n except:\n records = 0\n\n if records:\n\n for i in range(records):\n\n db.record = i\n\n (orid,time) = db.getv('orid','time')\n\n orid = _isNumber(orid)\n time = _isNumber(time)\n results[orid] = time\n\n return results", "def build_timings(events):\n\n stack = []\n timings = []\n for e in events:\n if e.type == 'START':\n stack.append(e)\n elif e.type == 'FINISH':\n prev = stack.pop()\n if prev.step != e.step:\n raise Exception(\n \"\"\"I have a FINISH event for the START event of a\n different step\"\"\")\n yield Proc(e.step, prev.timestamp, e.timestamp, e.job)", "def update_events_start_stop(self):\n\n # stateEventsList = [self.pj[ETHOGRAM][x][BEHAVIOR_CODE] for x in self.pj[ETHOGRAM] if\n # STATE in self.pj[ETHOGRAM][x][TYPE].upper()]\n\n for row in range(0, self.twEvents.rowCount()):\n\n t = self.twEvents.item(row, tw_obs_fields[\"Tempo\"]).text()\n\n if \":\" in t:\n time = time2seconds(t)\n else:\n time = Decimal(t)\n\n subject = self.twEvents.item(row, tw_obs_fields[\"Sujeito\"]).text()\n key = self.twEvents.item(row, tw_obs_fields[\"Chave\"]).text()\n modifier = self.twEvents.item(row, tw_obs_fields[\"Modificador\"]).text()\n\n # check if code is state\n nbEvents = len(\n [event[EVENT_BEHAVIOR_FIELD_IDX] for event in self.pj[OBSERVATIONS][self.observationId][EVENTS]\n if event[EVENT_BEHAVIOR_FIELD_IDX] == key\n and event[EVENT_TIME_FIELD_IDX] < time\n and event[EVENT_SUBJECT_FIELD_IDX] == subject\n and event[EVENT_MODIFIER_FIELD_IDX] == modifier])\n\n # if nbEvents and (nbEvents % 2): # test >0 and odd\n # self.twEvents.item(row, tw_obs_fields[TYPE]).setText(STOP)\n # else:\n # self.twEvents.item(row, tw_obs_fields[TYPE]).setText(START)", "def run_event_outside(self):\n QMessageBox.warning(self, programName, \"Function not yet implemented\")\n return\n\n if not self.observationId:\n self.no_observation()\n return\n\n if self.twEvents.selectedItems():\n row_s = self.twEvents.selectedItems()[0].row()\n row_e = self.twEvents.selectedItems()[-1].row()\n eventtime_s = self.pj[OBSERVATIONS][self.observationId][EVENTS][row_s][0]\n eventtime_e = self.pj[OBSERVATIONS][self.observationId][EVENTS][row_e][0]\n\n durations = [] # in seconds\n\n # TODO: check for 2nd player\n for mediaFile in self.pj[OBSERVATIONS][self.observationId][FILE][PLAYER1]:\n durations.append(self.pj[OBSERVATIONS][self.observationId][\"media_info\"][\"length\"][mediaFile])\n\n mediaFileIdx_s = [idx1 for idx1, x in enumerate(durations) if eventtime_s >= sum(durations[0:idx1])][-1]\n media_path_s = self.pj[OBSERVATIONS][self.observationId][FILE][PLAYER1][mediaFileIdx_s]\n\n mediaFileIdx_e = [idx1 for idx1, x in enumerate(durations) if eventtime_e >= sum(durations[0:idx1])][-1]\n media_path_e = self.pj[OBSERVATIONS][self.observationId][FILE][PLAYER1][mediaFileIdx_e]\n\n # calculate time for current media file in case of many queued media files\n\n print(mediaFileIdx_s)\n print(type(eventtime_s))\n print(durations)\n\n eventtime_onmedia_s = round(eventtime_s - float2decimal(sum(durations[0:mediaFileIdx_s])), 3)\n eventtime_onmedia_e = round(eventtime_e - float2decimal(sum(durations[0:mediaFileIdx_e])), 3)\n\n print(row_s, media_path_s, eventtime_s, eventtime_onmedia_s)\n print(self.pj[OBSERVATIONS][self.observationId][EVENTS][row_s])\n\n print(row_e, media_path_e, eventtime_e, eventtime_onmedia_e)\n print(self.pj[OBSERVATIONS][self.observationId][EVENTS][row_e])\n\n if media_path_s != media_path_e:\n print(\"events are located on 2 different media files\")\n return\n\n media_path = media_path_s\n\n # example of external command defined in environment:\n # export eMOCEXTERNAL=\"myprog -i {MEDIA_PATH} -s {START_S} -e {END_S} {DURATION_MS} --other\"\n\n if \"eMOCEXTERNAL\" in os.environ:\n external_command_template = os.environ[\"eMOCEXTERNAL\"]\n else:\n print(\"eMOCEXTERNAL env var not defined\")\n return\n\n external_command = external_command_template.format(OBS_ID=self.observationId,\n MEDIA_PATH='\"{}\"'.format(media_path),\n MEDIA_BASENAME='\"{}\"'.format(\n os.path.basename(media_path)),\n START_S=eventtime_onmedia_s,\n END_S=eventtime_onmedia_e,\n START_MS=eventtime_onmedia_s * 1000,\n END_MS=eventtime_onmedia_e * 1000,\n DURATION_S=eventtime_onmedia_e - eventtime_onmedia_s,\n DURATION_MS=(\n eventtime_onmedia_e - eventtime_onmedia_s) * 1000)\n\n print(external_command)\n '''\n p = subprocess.Popen(external_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n '''\n\n '''\n if eventtimeS == eventtimeE:\n q = []\n else:\n durationsec = eventtimeE-eventtimeS\n q = [\"--durationmsec\",str(int(durationsec*1000))]\n args = [ex, \"-f\",os.path.abspath(fn),\"--seekmsec\",str(int(eventtimeS*1000)),*q,*(\"--size 1 --track 1 --redetect 100\").split(\" \")]\n if os.path.split(fn)[1].split(\"_\")[0] in set([\"A1\",\"A2\",\"A3\",\"A4\",\"A5\",\"A6\",\"A7\",\"A8\",\"A9\",\"A10\"]):\n args.append(\"--flip\")\n args.append(\"2\")\n print (os.path.split(fn)[1].split(\"_\")[0])\n print (\"running\",ex,\"with\",args,\"in\",os.path.split(ex)[0])\n #pid = subprocess.Popen(args,executable=ex,cwd=os.path.split(ex)[0])\n '''\n\n # Extract Information:\n # videoname of current observation\n # timeinterval\n # custom execution", "def triggers(rate, volume, uptime, start_mjd, end_mjd, episodes):\n episode_events = []\n for episode in range(episodes):\n events = episode_triggers(rate, volume, uptime, start_mjd, end_mjd)\n events['episode'] = episode\n episode_events.append(\n events[['episode', 'event_id', 'mjd', 'ra', 'decl']])\n\n events = pd.concat(episode_events, axis=0)\n\n return events", "def sample_times():\n\tthe_times = []\n\tday = config.window_start_date\n\twhile day <= config.window_end_date:\n\t\t# times from start of window on day to end of window \n\t\ttime = config.tz.localize( datetime.combine( \n\t\t\tday, config.window_start_time \n\t\t) )\n\t\tend_time = config.tz.localize( datetime.combine( \n\t\t\tday, config.window_end_time \n\t\t) )\n\t\twhile time < end_time: # While still in the time window\n\t\t\tthe_times.append( time )\n\t\t\ttime += timedelta(minutes=1)\n\t\tday += timedelta(days=1)\n\treturn the_times", "def database_extraction(mod_start_time,mod_end_time,instrument_chosen,subevent_bool,\r\n detect_previous_event = False,thresholds='100,1',\r\n one_thresh = False):\r\n obs_file_created = False\r\n\r\n #extending time window\r\n window_end_time = (mod_end_time + timedelta(days=2))\r\n window_start_time = (mod_start_time - timedelta(days=2))\r\n \r\n #making a list of all dates within window\r\n day_list=[]\r\n for d in range(10):\r\n day_list.append((window_start_time + timedelta(days=d)).date())\r\n print('day list = %s' %day_list)\r\n \r\n print('determining if an instrument has been chosen')\r\n\r\n if instrument_chosen:\r\n #if an instrument has been chosen, checking to make sure it still works for this date\r\n if inst_end < window_end_time:\r\n instrument_chosen = False\r\n else:\r\n #if insturment hasn't been chosen, figuring out what it should be for given date\r\n try:\r\n #if instrument is specified in cfg using that\r\n instrument = cfg.instrument\r\n inst_end = datetime.today()\r\n print('using %s as our instrument for observations' %instrument)\r\n instrument_chosen = True\r\n\r\n except:\r\n #choosing instrument using function if not given in cfg\r\n instrument_stuff = choose_prime_inst(window_start_time.date(),\r\n window_end_time.date())\r\n instrument = instrument_stuff[0]\r\n #figuring out how long we can use this instrument\r\n inst_end = instrument_stuff[1]\r\n instrument_chosen = True\r\n \r\n #running katie's code to extract data using chosen instrument and dates\r\n print('extracting data from GOES website')\r\n \r\n #running for only one threshold if one_thresh is true, otherwise running for default\r\n #thresholds as well as any additional threshold given\r\n if one_thresh:\r\n one_sep.run_all(str(window_start_time), str(window_end_time), str(instrument),\r\n 'integral', '', '', True, detect_previous_event, thresholds) \r\n print('ran for threshold %s' %thresholds)\r\n else:\r\n if subevent_bool:\r\n thresholds = '10,1'\r\n #if event is a subevent, changing the threshold in katie's code to\r\n #10 MeV > 1pfu so that it will be recorded\r\n print('********************SUBEVENT**************************')\r\n sep.run_all(str(window_start_time), str(window_end_time), str(instrument),\r\n 'integral', '', '', True, detect_previous_event, thresholds)\r\n print('ran for subevent')\r\n else:\r\n #if an event, running with usual thresholds\r\n print('********************EVENT*****************************')\r\n sep.run_all(str(window_start_time), str(window_end_time),str(instrument), \r\n 'integral', '', '', True, detect_previous_event, thresholds)\r\n \r\n #reloading function so it doesn't keep old data \r\n reload(sep)\r\n \r\n #reformatting csv created from katie's code to json\r\n print('extracted - reformatting') \r\n for day in day_list: \r\n if not obs_file_created:\r\n #checking each day within the window to find the csv file if it hasn't\r\n #already been found\r\n print('thresholds: %s' %thresholds)\r\n \r\n if one_thresh:\r\n #name includes threshold if only ran for one threshold\r\n new_obs_name = ('sep_values_' + str(instrument) + '_integral_gt' +\r\n str(thresholds).split(',')[0] + '_' + str(thresholds).split(',')[1] + 'pfu_' +\r\n day.strftime('%Y_%m_%d').replace('_0','_') + '.csv')\r\n else:\r\n #otherwise only includes date ran for\r\n new_obs_name = ('sep_values_' + str(instrument) + '_integral_' +\r\n day.strftime('%Y_%m_%d').replace('_0','_') + '.csv')\r\n \r\n print('new_os_name %s' %new_obs_name) \r\n \r\n #checking if that file exists\r\n if os.path.exists(katies_path / new_obs_name):\r\n #if a file with this date exists, creating the corresponding json file\r\n \r\n #json name\r\n if one_thresh:\r\n obs_name = (str(instrument) + '_' + str(day) + 'only_' + str(thresholds).split(',')[0] + 'MeV_event.json')\r\n else:\r\n obs_name = (str(instrument) + '_' +\r\n str(day) + '.json')\r\n #creating json file\r\n obs_csv2json((katies_path / new_obs_name), obs_name,\r\n (ref_path/'example_sepscoreboard_json_file_v20190228.json'),\r\n instrument)\r\n \r\n print('obs file created')\r\n #file is created - will not run for anymore dates within window\r\n obs_file_created = True\r\n \r\n return(obs_name)\r\n else:\r\n print('no csv file found with this date, checking next one')", "def get_events(\n events_root,\n gcd_dir,\n start=None,\n stop=None,\n step=None,\n agg_start=None,\n agg_stop=None,\n agg_step=None,\n truth=None,\n photons=None,\n pulses=None,\n recos=None,\n triggers=None,\n angsens_model=None,\n hits=None,\n hit_charge_quant=None,\n min_hit_charge=None,\n):\n if isinstance(events_root, string_types):\n events_roots = [expand(events_root)]\n else:\n if not isinstance(events_root, Iterable):\n raise TypeError(\"`events_root` must be string or iterable thereof\")\n events_roots = []\n for events_root_ in events_root:\n if not isinstance(events_root_, string_types):\n raise TypeError(\n \"Each value in an iterable `events_root` must be a string\"\n )\n events_roots.append(expand(events_root_))\n\n slice_kw = dict(start=start, stop=stop, step=step)\n\n if agg_start is None:\n agg_start = 0\n else:\n agg_start_ = int(agg_start)\n assert agg_start_ == agg_start\n agg_start = agg_start_\n\n if agg_step is None:\n agg_step = 1\n else:\n agg_step_ = int(agg_step)\n assert agg_step_ == agg_step\n agg_step = agg_step_\n\n assert agg_start >= 0\n assert agg_step >= 1\n\n if agg_stop is not None:\n assert agg_stop > agg_start >= 0\n agg_stop_ = int(agg_stop)\n assert agg_stop_ == agg_stop\n agg_stop = agg_stop_\n\n if truth is not None and not isinstance(truth, bool):\n raise TypeError(\"`truth` is invalid type: {}\".format(type(truth)))\n\n if photons is not None and not isinstance(photons, (string_types, Iterable)):\n raise TypeError(\"`photons` is invalid type: {}\".format(type(photons)))\n\n if pulses is not None and not isinstance(pulses, (string_types, Iterable)):\n raise TypeError(\"`pulses` is invalid type: {}\".format(type(pulses)))\n\n if recos is not None and not isinstance(recos, (string_types, Iterable)):\n raise TypeError(\"`recos` is invalid type: {}\".format(type(recos)))\n\n if triggers is not None and not isinstance(triggers, (string_types, Iterable)):\n raise TypeError(\"`triggers` is invalid type: {}\".format(type(triggers)))\n\n if hits is not None and not isinstance(hits, string_types):\n raise TypeError(\"`hits` is invalid type: {}\".format(type(hits)))\n\n if hits is not None:\n if hit_charge_quant is None:\n raise ValueError(\n \"`hit_charge_quant` must be specified if `hits` is specified\"\n )\n if min_hit_charge is None:\n raise ValueError(\n \"`min_hit_charge` must be specified if `hits` is specified\"\n )\n\n agg_event_idx = -1\n for events_root in events_roots:\n for dirpath, dirs, files in walk(events_root, followlinks=True):\n dirs.sort(key=nsort_key_func)\n\n if \"events.npy\" not in files:\n continue\n\n file_iterator_tree = OrderedDict()\n\n num_events, event_indices, headers = iterate_file(\n join(dirpath, 'events.npy'), **slice_kw\n )\n\n meta = OrderedDict(\n [\n (\"events_root\", dirpath),\n (\"num_events\", num_events),\n (\"event_idx\", None),\n (\"agg_event_idx\", None),\n ]\n )\n\n event_indices_iter = iter(event_indices)\n file_iterator_tree['header'] = iter(headers)\n\n # -- Translate args with defaults / find dynamically-specified things -- #\n\n if truth is None:\n truth_ = isfile(join(dirpath, 'truth.npy'))\n else:\n truth_ = truth\n\n if photons is None:\n dpath = join(dirpath, 'photons')\n if isdir(dpath):\n photons_ = [splitext(d)[0] for d in listdir(dpath)]\n else:\n photons_ = False\n elif isinstance(photons, string_types):\n photons_ = [photons]\n else:\n photons_ = photons\n\n if pulses is None:\n dpath = join(dirpath, 'pulses')\n if isdir(dpath):\n pulses_ = [splitext(d)[0] for d in listdir(dpath) if 'TimeRange' not in d]\n else:\n pulses_ = False\n elif isinstance(pulses, string_types):\n pulses_ = [pulses]\n else:\n pulses_ = list(pulses)\n\n if recos is None:\n dpath = join(dirpath, 'recos')\n if isdir(dpath):\n # TODO: make check a regex including colons, etc. so we don't\n # accidentally exclude a valid reco that starts with \"slc\"\n recos_ = []\n for fname in listdir(dpath):\n if fname[:3] in (\"slc\", \"evt\"):\n continue\n fbase = splitext(fname)[0]\n if fbase.endswith(\".llhp\"):\n continue\n recos_.append(fbase)\n else:\n recos_ = False\n elif isinstance(recos, string_types):\n recos_ = [recos]\n else:\n recos_ = list(recos)\n\n if triggers is None:\n dpath = join(dirpath, 'triggers')\n if isdir(dpath):\n triggers_ = [splitext(d)[0] for d in listdir(dpath)]\n else:\n triggers_ = False\n elif isinstance(triggers, string_types):\n triggers_ = [triggers]\n else:\n triggers_ = list(triggers)\n\n # Note that `hits_` must be defined after `pulses_` and `photons_`\n # since `hits_` is one of these\n if hits is None:\n if pulses_ is not None and len(pulses_) == 1:\n hits_ = ['pulses', pulses_[0]]\n elif photons_ is not None and len(photons_) == 1:\n hits_ = ['photons', photons_[0]]\n elif isinstance(hits, string_types):\n hits_ = hits.split('/')\n else:\n raise TypeError(\"{}\".format(type(hits)))\n\n # -- Populate the file iterator tree -- #\n\n if truth_:\n num_truths, _, truths = iterate_file(\n fpath=join(dirpath, 'truth.npy'), **slice_kw\n )\n assert num_truths == num_events\n file_iterator_tree['truth'] = iter(truths)\n\n if photons_:\n photons_ = sorted(photons_)\n file_iterator_tree['photons'] = iterators = OrderedDict()\n for photon_series in photons_:\n num_phs, _, photon_serieses = iterate_file(\n fpath=join(dirpath, 'photons', photon_series + '.pkl'), **slice_kw\n )\n assert num_phs == num_events\n iterators[photon_series] = iter(photon_serieses)\n\n if pulses_:\n file_iterator_tree['pulses'] = iterators = OrderedDict()\n for pulse_series in sorted(pulses_):\n num_ps, _, pulse_serieses = iterate_file(\n fpath=join(dirpath, 'pulses', pulse_series + '.pkl'), **slice_kw\n )\n assert num_ps == num_events\n iterators[pulse_series] = iter(pulse_serieses)\n\n num_tr, _, time_ranges = iterate_file(\n fpath=join(\n dirpath,\n 'pulses',\n pulse_series + 'TimeRange' + '.npy'\n ),\n **slice_kw\n )\n assert num_tr == num_events\n iterators[pulse_series + 'TimeRange'] = iter(time_ranges)\n\n if recos_:\n file_iterator_tree['recos'] = iterators = OrderedDict()\n for reco in sorted(recos_):\n num_recoses, _, recoses = iterate_file(\n fpath=join(dirpath, 'recos', reco + '.npy'), **slice_kw\n )\n assert num_recoses == num_events\n iterators[reco] = iter(recoses)\n\n if triggers_:\n file_iterator_tree['triggers'] = iterators = OrderedDict()\n for trigger_hier in sorted(triggers_):\n num_th, _, trigger_hiers = iterate_file(\n fpath=join(dirpath, 'triggers', trigger_hier + '.pkl'), **slice_kw\n )\n assert num_th == num_events\n iterators[trigger_hier] = iter(trigger_hiers)\n\n if hits_ is not None and hits_[0] == 'photons':\n angsens_model, _ = load_angsens_model(angsens_model)\n else:\n angsens_model = None\n\n while True:\n try:\n event = extract_next_event(file_iterator_tree)\n except StopIteration:\n break\n\n if hits_ is not None:\n hits_array, hits_indexer, hits_summary = get_hits(\n event=event,\n path=hits_,\n hit_charge_quant=hit_charge_quant,\n min_hit_charge=min_hit_charge,\n angsens_model=angsens_model,\n )\n event['hits'] = hits_array\n event['hits_indexer'] = hits_indexer\n event['hits_summary'] = hits_summary\n\n agg_event_idx += 1\n\n event.meta = deepcopy(meta)\n event.meta[\"event_idx\"] = next(event_indices_iter)\n event.meta[\"agg_event_idx\"] = agg_event_idx\n\n if agg_stop is not None and agg_event_idx >= agg_stop:\n return\n\n if agg_event_idx < agg_start or (agg_event_idx - agg_start) % agg_step != 0:\n continue\n\n yield event\n\n for key in list(file_iterator_tree.keys()):\n del file_iterator_tree[key]\n del file_iterator_tree", "def window_index_time(t,windowsize,overlap):\r\n \r\n try:\r\n t=t.tolist()\r\n except:\r\n t=t\r\n \r\n t1=t[0]\r\n t2=t1 + timedelta(seconds=windowsize)\r\n pt1=[0]\r\n pt2=[othertime.findNearest(t2,t)]\r\n while t2 < t[-1]:\r\n t1 = t2 - timedelta(seconds=overlap)\r\n t2 = t1 + timedelta(seconds=windowsize)\r\n\r\n pt1.append(othertime.findNearest(t1,t))\r\n pt2.append(othertime.findNearest(t2,t))\r\n \r\n return pt1, pt2", "def process_events_optimised(self, events_chuck_size, data_chunk_size):\n ev = self.events\n tz = self.args[\"timezone\"]\n indexer = ev.Time.str.contains(\"\\d\\d:\\d\\d\", regex=True, na=False)\n timed_events, several_days_events = ev[indexer], ev[~indexer]\n \n if not several_days_events.empty:\n several_days_events.to_csv(\"special_events.csv\", index=False)\n self.log(\"[+] Special events were saved into standalone CSV-file\")\n else:\n self.log(\"[!] Special events not found\")\n\n self.data = pd.read_csv(self.args[\"data\"],\n iterator=True, chunksize=data_chunk_size)\n\n self.log(\"[.] Events and data linking...\")\n\n start, end = 0, events_chuck_size\n relevant_dates = pd.DataFrame()\n count = 1\n while True:\n events_slice = timed_events.iloc[start:end]\n # TODO: remove in release version\n # events_slice.to_csv('slice_{}_{}.csv'.format(start, end),\n # index=False)\n\n if events_slice.empty:\n break\n\n first_date, first_time = events_slice[['Date', 'Time']].iloc[0]\n lower_bound = convert(first_date + \" \" + first_time, mode='date')\n lower_bound += timedelta(hours=tz, minutes=-1)\n\n last_date, last_time = events_slice[['Date', 'Time']].iloc[-1]\n upper_bound = convert(last_date + \" \" + last_time, mode='date')\n upper_bound += timedelta(hours=tz, minutes=5)\n \n self.log(\"[.] Events slice bounded by [%s; %s] is in processing...\",\n lower_bound, upper_bound)\n \n for chunk in self.data:\n bounds = (lower_bound, upper_bound)\n linked, rest = self._process_chuck(\n chunk, bounds, events_slice, relevant_dates)\n\n relevant_dates = rest\n\n if linked is None:\n if relevant_dates.empty:\n err = \"[!] Warning: events from %d to %d have no data\"\n self.log(err, start + 1, end)\n break\n else:\n continue\n\n if linked.empty:\n err = \"[!] Warning: linked dataframe is empty\"\n self.log(err, severe=True)\n continue\n\n self.log(\"[+] Events from %d to %d were linked. \"\n \"Dataframe size: %d\", start + 1, end, linked.shape[0])\n\n filename = 'linked_events_{}_to_{}.csv'.format(start + 1, end)\n filename = os.path.join(self.args[\"output_folder\"], filename)\n linked.to_csv(filename, index=False)\n linked = pd.DataFrame()\n break\n\n count += 1\n start = end\n end += events_chuck_size", "def events_between(self, starting_measure, starting_offset, ending_measure, ending_offset):\n output_events = []\n for i in range(starting_measure - 1, ending_measure - 1 + 1):\n for event in self.event_groups[i].events:\n if i == starting_measure - 1:\n if i == 0 and event.offset >= starting_offset:\n output_events.append(event)\n elif i != 0 and event.offset > starting_offset:\n output_events.append(event)\n elif i == ending_measure - 1:\n if event.offset < ending_offset and ending_offset != 0:\n output_events.append(event)\n else:\n output_events.append(event)\n return output_events", "def construct_event(date_list, timeformat, dateformat, longdateformat,\n datetimeformat, longdatetimeformat, defaulttz,\n defaulttimelen=60, defaultdatelen=1, encoding='utf-8',\n _now=datetime.now):\n today = datetime.today()\n\n all_day = False\n\n # looking for start datetime\n try:\n # first two elements are a date and a time\n dtstart = datetimefstr(date_list, datetimeformat, longdatetimeformat)\n except ValueError:\n try:\n # first element is a time\n dtstart = timefstr(date_list, timeformat)\n except ValueError:\n try:\n # first element is a date (and since second isn't a time this\n # is an all-day-event\n dtstart = datetimefstr(date_list, dateformat, longdateformat)\n all_day = True\n except ValueError:\n raise\n\n # now looking for the end\n if all_day:\n try:\n # second element must be a date, too\n dtend = datetimefstr(date_list, dateformat, longdateformat)\n dtend = dtend + timedelta(days=1)\n except ValueError:\n # if it isn't we expect it to be the summary and use defaultdatelen\n # as event length\n dtend = dtstart + timedelta(days=defaultdatelen)\n # test if dtend's year is this year, but dtstart's year is not\n if dtend.year == today.year and dtstart.year != today.year:\n dtend = datetime(dtstart.year, *dtend.timetuple()[1:6])\n\n if dtend < dtstart:\n dtend = datetime(dtend.year + 1, *dtend.timetuple()[1:6])\n\n else:\n try:\n # next element datetime\n dtend = datetimefstr(date_list, datetimeformat, longdateformat)\n except ValueError:\n try:\n # next element time only\n dtend = timefstr(date_list, timeformat)\n dtend = datetime(*(dtstart.timetuple()[:3] + dtend.timetuple()[3:5]))\n except ValueError:\n dtend = dtstart + timedelta(minutes=defaulttimelen)\n\n if dtend < dtstart:\n dtend = datetime(*dtstart.timetuple()[0:3] +\n dtend.timetuple()[3:5])\n if dtend < dtstart:\n dtend = dtend + timedelta(days=1)\n if all_day:\n dtstart = dtstart.date()\n dtend = dtend.date()\n\n else:\n try:\n # next element is a valid Olson db timezone string\n dtstart = pytz.timezone(date_list[0]).localize(dtstart)\n dtend = pytz.timezone(date_list[0]).localize(dtend)\n date_list.pop(0)\n except (pytz.UnknownTimeZoneError, UnicodeDecodeError):\n dtstart = defaulttz.localize(dtstart)\n dtend = defaulttz.localize(dtend)\n\n event = icalendar.Event()\n text = ' '.join(date_list).decode(encoding)\n summary = text.split(' :: ',1)[0]\n\n try:\n description = text.split(' :: ',1)[1]\n event.add('description',description)\n except IndexError:\n pass\n\n event.add('dtstart', dtstart)\n event.add('dtend', dtend)\n event.add('dtstamp', _now())\n event.add('summary', summary)\n event.add('uid', generate_random_uid())\n return event", "def find_time_boundaries(indices, times, drop_single_idx=False):\n\n ## If times are not the same size as indices, assume these times are for all recordings\n ## and the recording time for IDX NUM is times[NUM] (ie. idx 5 was recorded at times[5])\n if len(times) != len(indices):\n times = np.array(times)[np.array(indices)]\n\n ## Since list slicing counts up to but not including ends, we need to add 1 to all detected end locations\n ends = np.where(np.diff(indices) > 1)[0] + 1\n\n ## Starts and ends will equal each other since list slicing includes start values, but start needs 0 appended\n starts = np.copy(ends)\n if len(starts) == 0 or starts[0] != 0:\n starts = np.insert(starts, 0, 0)\n\n ## np.diff returns an array one smaller than the indices list, so we need to add the last idx to the ends\n if len(ends) == 0 or ends[-1] != len(indices):\n ends = np.insert(ends, len(ends), len(indices))\n\n ## Loop through all continuous idx start & end to see if any are too small (length = 1)\n time_boundaries = []\n for start, end in zip(starts, ends):\n if end - start < 2:\n if not drop_single_idx:\n raise PipelineException(f\"Disconnected index found at index {start}\")\n else:\n bounds = [np.nanmin(times[start:end]), np.nanmax(times[start:end])]\n time_boundaries.append(bounds)\n\n return time_boundaries", "def draw_around_event(power,events,borders,eventName,maxY=1200):\n event_consider = events[events['eventName']==eventName].reset_index(drop=True)\n print(\"number of\", eventName ,\"in groudtruth=\",len(event_consider))\n i = 0\n while(i<len(event_consider)):\n date = time.mktime(datetime.strptime(event_consider['time'][i], \"%Y-%m-%d %H:%M:%S\").timetuple())\n start = str(datetime.fromtimestamp(date-borders[0]))\n end = str(datetime.fromtimestamp(date+borders[1]))\n print(date,start,end)\n i += 1\n serie = Series.from_array(power[(power['time']>=start)&(power['time']<=end)]['value'])\n if len(serie)>0:\n v = [serie.index[0], serie.index[len(serie)-1], 0, maxY]#xmin,xmax,ymin,ymax\n pyplot.figure(figsize=(20, 5))\n pyplot.plot(serie,'ro')\n pyplot.axis(v)\n pyplot.show()\n else:\n print(\"No data of power for this event\")", "def get_data_for_events(self,event_list,start_end = (-1000,2000),channels = None):\n assert len(event_list)>0, \"Event list is not a list\"\n assert len(event_list)<10000, \"Only up to 10000 timepoints support right now\"\n assert start_end[0]<start_end[1], \"Incorrect values for start and end\"\n \n start = start_end[0]\n end = start_end[1]\n event_list = [int(x) for x in event_list]\n if (channels == None):\n channels = range(self.num_channels)\n channels.sort()\n rv = n.zeros((end-start,len(channels),len(event_list)),\"d\")\n arb = n.zeros((self.num_channels),n.bool)\n for c in channels:\n arb[c]=True\n \n for i,t in enumerate(event_list):\n if t+start<0 or t+end>self.num_datapoints:\n raise IndexError(\"Cannot get data from %i to %i\" % (t+start,t+end) )\n rv[:,:,i] = self[t+start:t+end,arb]\n return rv", "def preprocess_events(events):\n events = events[events['type'] != 'system']\n events.is_copy = False\n \n # set the time interval between events\n t = np.array(events['seconds']) \n t_plus_1 = np.append(t[1:(len(t))],t[len(t)-1])\n t = t_plus_1 - t\n events['interval'] = t\n \n # Create an identifier for the minute of the event, usefull when creating\n # the sessions\n dt = [time.strptime(d, '%Y-%m-%d %H:%M:%S') for d in events['datetime']]\n events['minute'] = [get_minute(d) for d in dt]\n \n # label interruptions and sessions\n interruptions = []\n sessions = []\n intervals = t\n s_id = 0\n for i in range(0,len(events)):\n sessions.append(s_id)\n if intervals[i] >= SPACE_BETWEEN_INT:\n s_id += 1\n interruptions.append(False)\n continue\n \n if intervals[i] >= SIZE_INT:\n interruptions.append(True)\n else:\n interruptions.append(False)\n \n events['is_interruption'] = interruptions\n events['session_id'] = sessions\n \n return events", "def process_events(lines):\n event_regex = re.compile(r'^\\[(\\d{4})-(\\d{2})-(\\d{2}) (\\d{2}):(\\d{2})\\] (.+)', re.MULTILINE)\n guard_regex = re.compile(r'Guard #(\\d+)')\n\n # The loop needs to remember the last guard ID it found so it knows who wake/sleep events should belong to\n guard_id = None\n\n events = []\n guard_ids = set()\n\n # findall gets every match from the string and returns a list of lists containing only the matching groups\n # The MULTILINE flag is important\n for year, month, day, hour, minute, event_description in event_regex.findall(lines):\n # Use the single-match function to see if there's a guard ID (implying it's a SHIFT_BEGIN)\n id_match = guard_regex.match(event_description)\n\n if id_match:\n guard_id = int(id_match.group(1))\n guard_ids.add(guard_id)\n event_type = EventType.SHIFT_BEGIN\n elif event_description == 'wakes up':\n event_type = EventType.WAKE\n else:\n event_type = EventType.SLEEP\n\n events.append(ScheduleEvent(datetime(int(year), int(month), int(day), hour=int(hour), minute=int(minute)),\n guard_id, event_type))\n\n return guard_ids, events", "def convert_clocks_time_to_time(\n scan_key: dict,\n time_boundaries,\n source_type: str,\n target_type: str,\n return_interpolate: bool = False,\n drop_single_idx: bool = True,\n debug: bool = True,\n):\n\n ##\n ## Fetch source and target times, along with converting between Stimulus or Behavior clock if needed\n ##\n\n source_times, target_times = fetch_timing_data(\n scan_key, source_type, target_type, debug\n )\n\n ##\n ## Check if None is used to set to full length of signal or fix common error of not having a list of lists\n ##\n\n if time_boundaries is None:\n time_start = np.nanmin(source_times)\n time_stop = np.nanmax(source_times)\n time_boundaries = [[time_start, time_stop]]\n elif isinstance(time_boundaries[0], numbers.Number):\n time_boundaries = [time_boundaries]\n\n ##\n ## Convert source indices to time boundaries, then convert time boundaries into target indices\n ##\n\n target_indices = []\n single_idx_count = 0\n\n ## Loop through start & end times and create list of indices corresponding to that block of time\n for [start, end] in time_boundaries:\n target_idx = np.where(\n np.logical_and(target_times >= start, target_times <= end)\n )[0]\n if len(target_idx) < 2:\n if drop_single_idx:\n single_idx_count += 1\n else:\n msg = (\n f\"Event of length {len(target_idx)} found. \"\n f\"Set drop_single_idx to True to suppress these errors.\"\n )\n raise PipelineException(msg)\n else:\n target_indices.append(target_idx)\n\n if debug:\n print(f\"Indices converted. {single_idx_count} events of length 0 or 1 dropped.\")\n\n ##\n ## Interpolate related signal if requested, else return target times.\n ##\n\n if return_interpolate:\n\n ## Create full interpolated signal\n interpolated_signal = interpolate_signal_data(\n scan_key, source_type, target_type, source_times, target_times, debug=debug\n )\n\n ## Split indices given into fragments based on which ones are continuous (incrementing by 1)\n source_signal_fragments = []\n for idx_fragment in target_indices:\n source_signal_fragments.append(interpolated_signal[idx_fragment])\n\n ## If full signal is converted, remove wrapping list\n if len(source_signal_fragments) == 1:\n source_signal_fragments = source_signal_fragments[0]\n\n return source_signal_fragments\n\n else:\n\n ## Convert indices to times and return\n source_times_to_target_times = []\n\n for target_idx_list in target_indices:\n source_times_to_target_times.append(target_times[target_idx_list])\n\n return source_times_to_target_times", "def compareEvents(self, e1, e2, workStart, workEnd, nowDay, nowHour, nowMinute, timeEst):\n estTimeMin = timeEst * 60\n estMins = estTimeMin % 60\n estHours = (estTimeMin - estMins) / 60\n openTime = self.openTimeWindow(workStart, workEnd)\n\n sameDay = (e1.day == e2.day)\n\n #for time in morning before eventTime\n morningTime = (e2.hour * 60 + e2.minute) - (workStart)\n enoughMorningTime = morningTime >= estTimeMin + 30\n\n # For events on the same day\n timeDiff = (e2.hour * 60 + e2.minute) - (e1.hour * 60 + e1.minute)\n enoughTime = timeDiff >= (estTimeMin + 30)\n\n # For events on different days\n timeDiff2 = (1440 - (e1.hour * 60) + e1.minute) + (e2.hour * 60 + e2.minute)\n enoughTime2 = timeDiff2 >= (estTimeMin + 30)\n\n # Ensures that the algorithm doesn't schedule events in the past\n now = ((e1.hour == nowHour) and (e1.minute >= nowMinute)) or e1.hour > nowHour or (e1.day > nowDay)\n\n # Ensures that the entire scheduled event would be within the open working hours\n timeWindow = (e1.hour * 60) + e1.minute + (estTimeMin + 30)\n\n if(now and (sameDay and enoughTime and ((e1.hour*60) in openTime) and (timeWindow in openTime))):\n time = timeSlot.afterTimeSlot(e1)\n availableTimes.append(time)\n\n if(now and (not sameDay and enoughTime2 and ((e1.hour*60) in openTime) and (timeWindow in openTime))):\n time = timeSlot.afterTimeSlot(e1)\n availableTimes.append(time)\n\n if(not sameDay and enoughMorningTime):\n time = timeSlot.beforeTimeSlot(e2)\n availableTimes.append(time)", "def episode_triggers(rate, volume, uptime, start_mjd, end_mjd):\n nyears = (end_mjd - start_mjd)/365.2422\n n = num_events(rate, volume, uptime, nyears)\n events = sample_sphere(n, truncate=True)\n events['mjd'] = np.random.uniform(start_mjd, end_mjd, n)\n events['event_id'] = np.arange(n)\n events = events[['mjd', 'ra', 'decl']]\n events.sort_values('mjd', inplace=True)\n events['event_id'] = np.arange(n)\n return events", "def convert_clocks_time_to_idx(\n scan_key: dict,\n time_boundaries,\n source_type: str,\n target_type: str,\n return_interpolate: bool = False,\n drop_single_idx: bool = True,\n debug: bool = True,\n):\n\n ##\n ## Fetch source and target times, along with converting between Stimulus or Behavior clock if needed\n ##\n\n source_times, target_times = fetch_timing_data(\n scan_key, source_type, target_type, debug\n )\n\n ##\n ## Check if None is used to set to full length of signal or fix common error of not having a list of lists\n ##\n\n if time_boundaries is None:\n time_start = np.nanmin(source_times)\n time_stop = np.nanmax(source_times)\n time_boundaries = [[time_start, time_stop]]\n elif isinstance(time_boundaries[0], numbers.Number):\n time_boundaries = [time_boundaries]\n\n ##\n ## Convert source indices to time boundaries, then convert time boundaries into target indices\n ##\n\n target_indices = []\n single_idx_count = 0\n\n ## Loop through start & end times and create list of indices corresponding to that block of time\n for [start, end] in time_boundaries:\n target_idx = np.where(\n np.logical_and(target_times >= start, target_times <= end)\n )[0]\n if len(target_idx) < 2:\n if drop_single_idx:\n single_idx_count += 1\n else:\n msg = (\n f\"Event of length {len(target_idx)} found. \"\n f\"Set drop_single_idx to True to suppress these errors.\"\n )\n raise PipelineException(msg)\n else:\n target_indices.append(target_idx)\n\n if debug:\n print(f\"Indices converted. {single_idx_count} events of length 0 or 1 dropped.\")\n\n ##\n ## Interpolate related signal if requested, else just return the target_indices found.\n ##\n\n if return_interpolate:\n\n ## Create full interpolated signal\n interpolated_signal = interpolate_signal_data(\n scan_key, source_type, target_type, source_times, target_times, debug=debug\n )\n\n ## Split indices given into fragments based on which ones are continuous (incrementing by 1)\n source_signal_fragments = []\n for idx_fragment in target_indices:\n source_signal_fragments.append(interpolated_signal[idx_fragment])\n\n ## If full signal is converted, remove wrapping list\n if len(source_signal_fragments) == 1:\n source_signal_fragments = source_signal_fragments[0]\n\n return source_signal_fragments\n\n else:\n\n return target_indices", "def repeat(start, end, roi_times=None, timeres=2, coords=None, ar=None,\n split_temps=None, em_wlen=None, plotminmax=False, plotstd=False,\n hist_type='plain', loaddata=False):#, output=None):\n #if isinstance(output, str):\n # from matplotlib import use\n # use(output)\n import matplotlib.pyplot as plt\n import matplotlib.dates as mdates\n\n #loaddata = False\n\n print start, end\n start, end = parse(start), parse(end)\n \n s = []\n t = []\n p = []\n \n #if flares == []:\n # return s, t, p\n\n timerange = tr(start, end)\n delta = dt.timedelta(hours=timeres)\n ntimes = int(timerange.seconds()/delta.total_seconds())\n times = [time.start() for time in timerange.split(ntimes)]\n \n ntemps = 141\n tempsovertime = np.zeros((ntemps, ntimes))\n \n means = np.zeros(len(times))\n p95s = np.zeros(len(times))\n loopmeans = np.zeros(len(times))\n if plotminmax:\n maxes = np.zeros(len(times))\n mins = np.zeros(len(times))\n if plotstd:\n stds = np.zeros(len(times))\n loopstds = np.zeros(len(times))\n if em_wlen:\n meanem = np.zeros(len(times))\n if plotminmax:\n maxem = np.zeros(len(times))\n minem = np.zeros(len(times))\n if plotstd:\n stdem = np.zeros(len(times))\n\n for i, date in enumerate(times):\n data_only = True\n try:\n if ar == 'all':\n plotar = None\n else:\n plotar = ar\n results = output_maps(date, plotar, coords, 'data', split_temps,\n subimsize=50, calc_em=em_wlen, data_only=data_only)#True)#, linear=True)\n if isinstance(results, tuple):\n tempmap, emmap = results\n else:\n tempmap = results\n data = tempmap.data\n except DownloadError as de:\n data = np.zeros((512, 512))\n print de.msg\n except:\n print 'KHAAAAAAAN! Some part of the temperature-plotting process failed.'\n raise\n data = np.zeros((512, 512))\n if em_wlen:\n emmap = np.zeros((512, 512))\n \n t.append(np.nanmean(data))\n p.append(np.nanmax(data))\n \n data = data.flatten()\n data2 = data.copy()\n data2[data == 0.0] = np.NaN\n data2 = data2[np.isfinite(data)]\n data2.sort()\n temps, bins = np.histogram(data, bins=ntemps, density=False, range=(5.6, 7.0))\n temps = (temps/float(data.size))*100.0\n tempsovertime[:, i] = temps\n\n #loops = data[data >= split_temps]\n #data = data[data < split_temps]\n\n means[i] = np.nanmean(data2)\n try:\n p95s[i] = data2[round(0.95 * len(data2))-1]\n except IndexError:\n p95s[i] = np.NaN\n #loopmeans[i] = np.nanmean(loops)\n if plotminmax:\n maxes[i] = np.nanmax(data)\n mins[i] = np.nanmin(data)\n if em_wlen:\n maxem[i] = np.nanmax(emmap)\n minem[i] = np.nanmin(emmap)\n if plotstd:\n stds[i] = np.nanstd(data)\n if em_wlen:\n stdem[i] = np.nanstd(emmap)\n #loopstds[i] = np.nanstd(loops)\n \n tempsovertime[tempsovertime <= 0.1] = np.nan\n\n xmin, xmax = mdates.datestr2num([str(start), str(end)])\n fig = plt.figure(figsize=(36, 18))\n ax = fig.add_subplot(111, axisbg='k')\n plot_title = 'Temperature distribution of corona\\n{:%Y/%m/%d %H:%M} - {:%Y/%m/%d %H:%M}'.format(start, end)\n if roi_times:\n plot_title += '\\nRegion observed: {:%Y/%m/%d %H:%M} - {:%Y/%m/%d %H:%M}'.format(*roi_times)\n plt.title(plot_title)\n if hist_type == 'plain':\n plt.imshow(tempsovertime[30:106, :], extent=[xmin, xmax, 5.9, 6.65],\n aspect='auto', interpolation='none', origin='lower',\n cmap='coolwarm', vmin=np.nanmin(tempsovertime[65:106, :]),\n vmax=np.nanmax(tempsovertime[65:106, :]))\n elif hist_type == 'loops':\n plt.imshow(tempsovertime[65:106, :], extent=[xmin, xmax, 6.25, 6.65],\n aspect='auto', interpolation='none', origin='lower',\n cmap='coolwarm', vmin=np.nanmin(tempsovertime[65:106, :]),\n vmax=np.nanmax(tempsovertime[65:106, :]))\n elif hist_type == 'full':\n plt.imshow(tempsovertime, extent=[xmin, xmax, 5.6, 7.0],\n aspect='auto', interpolation='none', origin='lower',\n cmap='coolwarm', vmin=np.nanmin(tempsovertime),\n vmax=np.nanmax(tempsovertime))\n plt.tight_layout()\n ax.xaxis_date()\n fig.autofmt_xdate()\n plt.colorbar(orientation='horizontal')\n plt.savefig('/media/huw/temp-time_hists/distribution_over_time_{}'.format(ar))\n plt.close()\n\n\n means[np.where(means == 0.0)] = np.nan\n if plotstd:\n stds[np.where(stds == 0.0)] = np.nan\n loopstds[loopstds == 0.0] = np.nan\n\n try:\n tnums = mdates.date2num([ti for ti in times])\n print maxes\n print len(maxes)\n fig = plt.figure(figsize=(18, 14))\n ax = fig.add_subplot(111)\n plt.title('Variation of temperature over time; AR{}'.format(ar), \n fontsize=32)\n plt.plot(tnums, maxes, label='Maximum temperature', color='red')\n plt.axhline(np.nanmean(maxes))\n print tnums\n print len(tnums)\n ax.xaxis_date()\n fig.autofmt_xdate()\n plt.legend(loc=4, fontsize=16)\n plt.xlabel('Date', fontsize=24)\n plt.ylabel('log(T)', fontsize=24)\n #plt.savefig('/media/huw/temp_plots/temp_plot_{}_b'.format(ar))\n plt.savefig('/home/drew/Dropbox/ARs/temps_{}_b'.format(ar))\n plt.close()\n\n \"\"\"diff = ((maxes-p95s)/p95s)*100.0\n fig = plt.figure(figsize=(18, 14))\n ax = fig.add_subplot(1, 1, 1)\n plt.title('Percentage difference between max and 95th %-ile; AR{}'.format(ar), \n fontsize=32)\n plt.plot(tnums, diff, color='black')\n plt.scatter(fldates, [np.nanmax(diff)]*len(fldates))\n for flare in flares:\n ax.text(sunpy.time.parse_time(flare['event_peaktime']), np.nanmax(diff)+0.01, flare['fl_goescls'][0])\n ax.xaxis_date()\n fig.autofmt_xdate()\n plt.xlabel('Date', fontsize=24)\n plt.ylabel('log(T)', fontsize=24)\n #plt.savefig('/media/huw/temp_plots/temp_plot_{}'.format(ar))\n plt.savefig('Dropbox/ARs/diffs_{}'.format(ar))\n plt.close()\"\"\"\n \n except:# ValueError:\n print \"Can't plot the temperature graph because matplotlib is being a whiney douche\"\n print tnums\n raise\n\n return s, t, p, times", "def _set_window_time(slices, times):\n t_idx_ = [t[-1] for t in slices]\n return times[t_idx_]", "def _sliding_window(times, window, sfreq):\n import copy\n\n window = _DecodingTime(copy.deepcopy(window))\n\n # Default values\n time_slices = window.get('slices', None)\n # If the user hasn't manually defined the time slices, we'll define them\n # with ``start``, ``stop``, ``step`` and ``length`` parameters.\n if time_slices is None:\n window['start'] = window.get('start', times[0])\n window['stop'] = window.get('stop', times[-1])\n window['step'] = window.get('step', 1. / sfreq)\n window['length'] = window.get('length', 1. / sfreq)\n\n if not (times[0] <= window['start'] <= times[-1]):\n raise ValueError(\n 'start (%.2f s) outside time range [%.2f, %.2f].' % (\n window['start'], times[0], times[-1]))\n if not (times[0] <= window['stop'] <= times[-1]):\n raise ValueError(\n 'stop (%.2f s) outside time range [%.2f, %.2f].' % (\n window['stop'], times[0], times[-1]))\n if window['step'] < 1. / sfreq:\n raise ValueError('step must be >= 1 / sampling_frequency')\n if window['length'] < 1. / sfreq:\n raise ValueError('length must be >= 1 / sampling_frequency')\n if window['length'] > np.ptp(times):\n raise ValueError('length must be <= time range')\n\n # Convert seconds to index\n\n def find_t_idx(t): # find closest time point\n return np.argmin(np.abs(np.asarray(times) - t))\n\n start = find_t_idx(window['start'])\n stop = find_t_idx(window['stop'])\n step = int(round(window['step'] * sfreq))\n length = int(round(window['length'] * sfreq))\n\n # For each training slice, give time samples to be included\n time_slices = [range(start, start + length)]\n while (time_slices[-1][0] + step) <= (stop - length + 1):\n start = time_slices[-1][0] + step\n time_slices.append(range(start, start + length))\n window['slices'] = time_slices\n window['times'] = _set_window_time(window['slices'], times)\n return window", "def time_windows(baz, arriv_p, arriv_s, init_sec, is_local):\n\n # TIME WINDOWS (for arrivals and subplots)\n # Window lengths dependent on event distance\n if is_local == 'non-local':\n min_pw = arriv_p\n max_pw = min_pw + (arriv_s - arriv_p) // 4\n min_sw = arriv_s - 0.001 * (arriv_s - arriv_p)\n max_sw = arriv_s + 150\n min_lwi = surf_tts(baz[0], init_sec) - 20\n t1 = (baz[0]/1000000) * 50\n # window length grows 50 sec per 1000 km.\n max_lwi = min_lwi + t1\n min_lwf = max_lwi\n t2 = (baz[0]/1000000) * 60\n # window length grows 60 sec per 1000 km.\n max_lwf = min_lwf + t2\n elif is_local == 'local':\n min_pw = arriv_p\n max_pw = min_pw + 20\n min_sw = arriv_s - 5\n max_sw = min_sw + 20\n min_lwi = surf_tts(baz[0], init_sec) + 20\n max_lwi = min_lwi + 50\n min_lwf = max_lwi\n max_lwf = min_lwf + 80\n else:\n min_pw = arriv_p\n max_pw = min_pw + 7\n min_sw = arriv_s\n max_sw = min_sw + 7\n min_lwi = surf_tts(baz[0], init_sec) + 5\n max_lwi = min_lwi + 12\n min_lwf = max_lwi\n max_lwf = min_lwf + 80\n\n return min_pw, max_pw, min_sw, max_sw, min_lwi, max_lwi, min_lwf, max_lwf", "def gen_subevent_bools(p_10,p_100):\r\n #list of subevent booleans\r\n subevent_bools = []\r\n \r\n #extracting 10 MeV peak flux if it exists\r\n for j in range(len(p_10)):\r\n try:\r\n p10 = float(p_10[j])\r\n except ValueError:\r\n p10 = 'nan'\r\n \r\n #extracting 100 MeV peak flux if it exists\r\n try:\r\n p100 = float(p_100[j])\r\n except ValueError:\r\n p100 = 'nan'\r\n \r\n #checking if peak fluxes exist\r\n if str(p10) != 'nan' and str(p100) != 'nan':\r\n #if the peak fluxes both exist and >10 MeV is both below threshold,\r\n #subevent is true (only care about >10 bc of definition of subevent)\r\n if p10 < 10:\r\n subevent_bools.append(True)\r\n elif p10 > 10:\r\n subevent_bools.append(False)\r\n \r\n #if >10 MeV doesn't exist, subevent is true\r\n else:\r\n subevent_bools.append(True)\r\n \r\n return(subevent_bools)", "def combine_events_and_locations(self, grid):\n # clean locations data\n df_locations_cleaned = utils.clean_locations_data(self.df_locations, self.start, self.end)\n # clean events data\n df_events_cleaned = utils.clean_events_data(self.df_events, self.start, self.end)\n # combine and sort\n df_both = pd.concat([df_locations_cleaned, df_events_cleaned])\n # also sort time_type since want trips to be first when time is tied\n df_both = df_both.sort_values(by=['time', 'time_type'], ascending=[True, False])\n # include grid data\n df_both['grid_coord'] = df_both.apply(lambda x: grid.locate_point((x.lat, x.lng)), axis=1)\n df_both['grid_id'] = df_both.apply(lambda x: grid.get_cells()[x['grid_coord']].get_id(), axis=1)\n return self.find_cum_sum(df_both)", "def get_category_timetable(categ_ids, start_dt, end_dt, detail_level='event', tz=utc, from_categ=None, grouped=True,\n includible=lambda item: True):\n day_start = start_dt.astimezone(utc)\n day_end = end_dt.astimezone(utc)\n dates_overlap = lambda t: (t.start_dt >= day_start) & (t.start_dt <= day_end)\n\n items = defaultdict(lambda: defaultdict(list))\n\n # first of all, query TimetableEntries/events that fall within\n # specified range of dates (and category set)\n events = _query_events(categ_ids, day_start, day_end)\n if from_categ:\n events = events.filter(Event.is_visible_in(from_categ.id))\n for eid, tt_start_dt in events:\n if tt_start_dt:\n items[eid][tt_start_dt.astimezone(tz).date()].append(tt_start_dt)\n else:\n items[eid] = None\n\n # then, retrieve detailed information about the events\n event_ids = set(items)\n query = (Event.query\n .filter(Event.id.in_(event_ids))\n .options(subqueryload(Event.person_links).joinedload(EventPersonLink.person),\n joinedload(Event.own_room).noload('owner'),\n joinedload(Event.own_venue),\n joinedload(Event.category).undefer('effective_icon_data'),\n undefer('effective_protection_mode')))\n scheduled_events = defaultdict(list)\n ongoing_events = []\n events = []\n for e in query:\n if not includible(e):\n continue\n if grouped:\n local_start_dt = e.start_dt.astimezone(tz).date()\n local_end_dt = e.end_dt.astimezone(tz).date()\n if items[e.id] is None:\n # if there is no TimetableEntry, this means the event has not timetable on that interval\n for day in iterdays(max(start_dt.date(), local_start_dt), min(end_dt.date(), local_end_dt)):\n # if the event starts on this date, we've got a time slot\n if day.date() == local_start_dt:\n scheduled_events[day.date()].append((e.start_dt, e))\n else:\n ongoing_events.append(e)\n else:\n for start_d, start_dts in items[e.id].items():\n scheduled_events[start_d].append((start_dts[0], e))\n else:\n events.append(e)\n\n # result['events'][date(...)] -> [(datetime(....), Event(...))]\n # result[event_id]['contribs'][date(...)] -> [(TimetableEntry(...), Contribution(...))]\n # result['ongoing_events'] = [Event(...)]\n if grouped:\n result = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))\n else:\n result = defaultdict(lambda: defaultdict(list))\n\n result.update({\n 'events': scheduled_events if grouped else events,\n 'ongoing_events': ongoing_events\n })\n\n # according to detail level, ask for extra information from the DB\n if detail_level != 'event':\n query = _query_blocks(event_ids, dates_overlap, detail_level)\n if grouped:\n for b in query:\n start_date = b.timetable_entry.start_dt.astimezone(tz).date()\n result[b.session.event_id]['blocks'][start_date].append((b.timetable_entry, b))\n else:\n for b in query:\n result[b.session.event_id]['blocks'].append(b)\n\n if detail_level == 'contribution':\n query = (Contribution.query\n .filter(Contribution.event_id.in_(event_ids),\n dates_overlap(TimetableEntry),\n ~Contribution.is_deleted)\n .options(contains_eager(Contribution.timetable_entry),\n joinedload(Contribution.person_links))\n .join(TimetableEntry))\n if grouped:\n for c in query:\n start_date = c.timetable_entry.start_dt.astimezone(tz).date()\n result[c.event_id]['contribs'][start_date].append((c.timetable_entry, c))\n else:\n for c in query:\n result[c.event_id]['contributions'].append(c)\n\n query = (Break.query\n .filter(TimetableEntry.event_id.in_(event_ids), dates_overlap(TimetableEntry))\n .options(contains_eager(Break.timetable_entry))\n .join(TimetableEntry))\n if grouped:\n for b in query:\n start_date = b.timetable_entry.start_dt.astimezone(tz).date()\n result[b.timetable_entry.event_id]['breaks'][start_date].append((b.timetable_entry, b))\n else:\n for b in query:\n result[b.timetable_entry.event_id]['breaks'].append(b)\n return result", "def getEventData(bounds = None,radius=None,starttime = None,endtime = None,magrange = None,\n catalog = None,contributor = None,getComponents=False,\n getAngles=False,verbose=False,limitType=None,getAllMags=False,\n devServer=False):\n if catalog is not None and catalog not in checkCatalogs():\n raise Exception,'Unknown catalog %s' % catalog\n if contributor is not None and contributor not in checkContributors():\n raise Exception,'Unknown contributor %s' % contributor\n\n #Make sure user is not specifying bounds search AND radius search\n if bounds is not None and radius is not None:\n raise Exception,'Cannot choose bounds search AND radius search.'\n \n #start creating the url parameters\n urlparams = getEventParams(bounds,radius,starttime,endtime,magrange,\n catalog,contributor)\n\n #search parameters we're not making available to the user (yet)\n urlparams['orderby'] = 'time-asc'\n urlparams['format'] = 'geojson'\n params = urllib.urlencode(urlparams)\n eventlist = []\n if devServer:\n urlbase = URLBASE.replace(SERVER,DEVSERVER)\n else:\n urlbase = URLBASE\n url = urlbase % params\n fh = getURLHandle(url)\n #fh = urllib2.urlopen(url)\n feed_data = fh.read()\n fh.close()\n fdict = json.loads(feed_data)\n maxmags = 0\n for feature in fdict['features']:\n eventdict = OrderedDict()\n eventdict['id'] = [feature['id'],'%s']\n #eventdict['idlist'] = (feature['properties']['ids'].strip(',').split(','),'%s')\n if verbose:\n sys.stderr.write('Fetching data for event %s...\\n' % eventdict['id'])\n eventdict['time'] = [getUTCTimeStamp(feature['properties']['time']),'%s']\n eventdict['lat'] = [feature['geometry']['coordinates'][1],'%.4f']\n eventdict['lon'] = [feature['geometry']['coordinates'][0],'%.4f']\n depth = feature['geometry']['coordinates'][2]\n mag = feature['properties']['mag']\n if mag is None:\n mag = float('nan')\n if depth is None:\n depth = float('nan')\n eventdict['depth'] = [depth,'%.1f']\n eventdict['mag'] = [mag,'%g']\n eventdict['event-type'] = [feature['properties']['type'],'%s']\n \n if not getComponents and not getAngles and not getAllMags:\n eventlist.append(eventdict.copy())\n continue\n eurl = feature['properties']['detail']\n #eventdict['url'] = [eurl,'%s']\n fh = getURLHandle(eurl)\n #fh = urllib2.urlopen(eurl)\n data = fh.read()\n fh.close()\n #sys.stderr.write('%s - After reading %s\\n' % (ShakeDateTime.now(),url))\n edict = json.loads(data)\n #sometimes you find when you actually open the json for the event that it doesn't\n #REALLY have a moment tensor or focal mechanism, just delete messages for some that USED to be\n #there. Double-checking below.\n if edict['properties']['products'].has_key('moment-tensor'):\n hasMoment = edict['properties']['products']['moment-tensor'][0]['status'] != 'DELETE'\n else:\n hasMoment = False\n if edict['properties']['products'].has_key('focal-mechanism'):\n hasFocal = edict['properties']['products']['focal-mechanism'][0]['status'] != 'DELETE'\n else:\n hasFocal = False\n if getAllMags:\n if edict['properties']['products'].has_key('phase-data'):\n mags,magtypes,magsources = __getAllMagnitudes(edict['properties']['products']['phase-data'])\n else:\n mags,magtypes,magsources = __getAllMagnitudes(edict['properties']['products']['origin'])\n i = 1\n if len(mags) > maxmags:\n maxmags = len(mags)\n for mag,magtype,magsource in zip(mags,magtypes,magsources):\n eventdict['mag%i' % i] = [mag,'%.1f']\n eventdict['mag%i-source' % i] = [magsource,'%s']\n eventdict['mag%i-type' % i] = [magtype,'%s']\n #sys.stderr.write('Getting mag %i from event %s\\n' % (i,feature['id']))\n i += 1\n if getComponents:\n if hasMoment:\n mrr,mtt,mpp,mrt,mrp,mtp,mtype,mlat,mlon,mdepth,mduration = __getMomentComponents(edict,limitType)\n eventdict['mrr'] = [mrr,'%g']\n eventdict['mtt'] = [mtt,'%g']\n eventdict['mpp'] = [mpp,'%g']\n eventdict['mrt'] = [mrt,'%g']\n eventdict['mrp'] = [mrp,'%g']\n eventdict['mtp'] = [mtp,'%g']\n eventdict['type'] = [mtype,'%s']\n eventdict['moment-lat'] = [mlat,'%.4f']\n eventdict['moment-lon'] = [mlon,'%.4f']\n eventdict['moment-depth'] = [mdepth,'%.1f']\n eventdict['moment-duration'] = [mduration,'%.1f']\n else:\n eventdict['mrr'] = [NAN,'%g']\n eventdict['mtt'] = [NAN,'%g']\n eventdict['mpp'] = [NAN,'%g']\n eventdict['mrt'] = [NAN,'%g']\n eventdict['mrp'] = [NAN,'%g']\n eventdict['mtp'] = [NAN,'%g']\n eventdict['type'] = ['NA','%s']\n eventdict['moment-lat'] = [NAN,'%.4f']\n eventdict['moment-lon'] = [NAN,'%.4f']\n eventdict['moment-depth'] = [NAN,'%.1f']\n eventdict['moment-duration'] = [NAN,'%.1f']\n if getAngles:\n #sometimes there are delete products instead of real ones, fooling you into\n #thinking that there is really a moment tensor. Trapping for that here.\n if hasFocal or hasMoment:\n strike1,dip1,rake1,strike2,dip2,rake2 = __getFocalAngles(edict)\n eventdict['strike1'] = [strike1,'%.0f']\n eventdict['dip1'] = [dip1,'%.0f']\n eventdict['rake1'] = [rake1,'%.0f']\n eventdict['strike2'] = [strike2,'%.0f']\n eventdict['dip2'] = [dip2,'%.0f']\n eventdict['rake2'] = [rake2,'%.0f']\n else:\n eventdict['strike1'] = [NAN,'%.0f']\n eventdict['dip1'] = [NAN,'%.0f']\n eventdict['rake1'] = [NAN,'%.0f']\n eventdict['strike2'] = [NAN,'%.0f']\n eventdict['dip2'] = [NAN,'%.0f']\n eventdict['rake2'] = [NAN,'%.0f']\n eventlist.append(eventdict.copy())\n return (eventlist,maxmags)", "def subset_by_time(example_dict, first_time_unix_sec, last_time_unix_sec):\n\n error_checking.assert_is_integer(first_time_unix_sec)\n error_checking.assert_is_integer(last_time_unix_sec)\n error_checking.assert_is_geq(last_time_unix_sec, first_time_unix_sec)\n\n good_indices = numpy.where(numpy.logical_and(\n example_dict[VALID_TIMES_KEY] >= first_time_unix_sec,\n example_dict[VALID_TIMES_KEY] <= last_time_unix_sec\n ))[0]\n\n for this_key in ONE_PER_EXAMPLE_KEYS:\n if isinstance(example_dict[this_key], list):\n example_dict[this_key] = [\n example_dict[this_key][k] for k in good_indices\n ]\n else:\n example_dict[this_key] = (\n example_dict[this_key][good_indices, ...]\n )\n\n return example_dict, good_indices", "def filterEvents(intervals_dates,list_infected,distance):\n d=distance\n list_gpsevents=[]\n for z in range(len(intervals_dates)-1):\n print(\"Interval: \",intervals_dates[z], \"y\", intervals_dates[z+1])\n infected,uninfected=getTrazaTimestamp(intervals_dates[z],intervals_dates[z+1],GPSrecords,list_infected)\n events_gps = nearest_neighbor(infected, uninfected, d)\n events_gps = events_gps.drop(['geometry','closest_stop_geom'], axis=1)\n print(len(events_gps))\n if(len(events_gps)!=0):\n list_gpsevents.append(events_gps.reset_index(drop=True))\n else:\n events_gps=pd.DataFrame()\n list_gpsevents.append(events_gps)\n #GPSevents=pd.concat(list_gpsevents).reset_index(drop=True)\n #return GPSevents\n return list_gpsevents", "def exercise_blocks(events):\n events.sort(key=lambda e: (e.start_time, e.end_time))\n\n exercise_block = []\n block_start_event = None\n block_end_event = None\n\n events_with_dummy_exercise = events + [Event(event_type=Event.EXERCISE,\n start_time=datetime.datetime.max, end_time=datetime.datetime.max)]\n\n for event in events_with_dummy_exercise:\n if event.event_type != Event.EXERCISE:\n continue\n\n if not exercise_block:\n exercise_block.append(event)\n elif event.start_time - exercise_block[-1].end_time <= timedelta(hours=1):\n # Put the current event in the same exercise block.\n exercise_block.append(event)\n else:\n yield exercise_block\n # Start a new exercise block.\n exercise_block = [event]", "def test_shotgun():\n events = [['Event', '2017-11-22T11:30:00-08:00', '2017-11-22T12:10:00-08:00'],\n ['Event', '2017-11-22T12:00:00-08:00', '2017-11-22T13:00:00-08:00'],\n ['Event', '2017-11-22T12:30:00-08:00', '2017-11-22T13:30:00-08:00'],\n ['Event', '2017-11-23T10:00:00-08:00', '2017-11-23T11:20:00-08:00'],\n ['Event', '2017-11-23T14:00:00-08:00', '2017-11-23T15:00:00-08:00'],\n ['Event', '2017-11-24T14:30:00-08:00', '2017-11-25T19:00:00-08:00'],\n ['Event', '2017-11-25T12:00:00-08:00', '2017-11-25T13:00:00-08:00'],\n ['Event', '2017-11-26T11:30:00-08:00', '2017-11-26T12:10:00-08:00'],\n ['Event', '2017-11-26T12:30:00-08:00', '2017-11-26T13:30:00-08:00'],\n ['Event', '2017-11-28T10:00:00-08:00', '2017-11-28T11:20:00-08:00'],\n ['Event', '2017-11-28T12:00:00-08:00', '2017-11-28T13:00:00-08:00'],\n ['Event', '2017-11-28T14:00:00-08:00', '2017-11-28T15:00:00-08:00']]\n\n freetimes, _ = free(events, 9, 0, 17, 0, day_range, 30)\n fmt_freetime = output_format(freetimes)\n print(fmt_freetime)\n for i in fmt_freetime:\n print(i)\n assert fmt_freetime == ['Tue, Nov 21, 9:00 am to Tue, Nov 21, 5:00 pm.',\n 'Wed, Nov 22, 9:00 am to Wed, Nov 22, 11:30 am.',\n 'Wed, Nov 22, 1:30 pm to Wed, Nov 22, 5:00 pm.',\n 'Thu, Nov 23, 9:00 am to Thu, Nov 23, 10:00 am.',\n 'Thu, Nov 23, 11:20 am to Thu, Nov 23, 2:00 pm.',\n 'Thu, Nov 23, 3:00 pm to Thu, Nov 23, 5:00 pm.',\n 'Fri, Nov 24, 9:00 am to Fri, Nov 24, 2:30 pm.',\n 'Sun, Nov 26, 9:00 am to Sun, Nov 26, 11:30 am.',\n 'Sun, Nov 26, 1:30 pm to Sun, Nov 26, 5:00 pm.',\n 'Mon, Nov 27, 9:00 am to Mon, Nov 27, 5:00 pm.',\n 'Tue, Nov 28, 9:00 am to Tue, Nov 28, 10:00 am.',\n 'Tue, Nov 28, 11:20 am to Tue, Nov 28, 12:00 pm.',\n 'Tue, Nov 28, 1:00 pm to Tue, Nov 28, 2:00 pm.']", "def schedule_maker(dates, locator, list_uses):\n\n def get_yearly_vectors(dates, occ_schedules, el_schedules, dhw_schedules, pro_schedules, month_schedule):\n \"\"\"\n For a given use type, this script generates yearly schedules for occupancy, electricity demand,\n hot water demand, process electricity demand based on the daily and monthly schedules obtained from the\n archetype database.\n\n :param dates: dates and times throughout the year\n :type dates: DatetimeIndex\n :param occ_schedules: occupancy schedules for a weekdays, Saturdays and Sundays from the archetype database\n :type occ_schedules: list[array]\n :param el_schedules: electricity schedules for a weekdays, Saturdays and Sundays from the archetype database\n :type el_schedules: list[array]\n :param dhw_schedules: domestic hot water schedules for a weekdays, Saturdays and Sundays from the archetype\n database\n :type dhw_schedules: list[array]\n :param pro_schedules: process electricity schedules for a weekdays, Saturdays and Sundays from the archetype\n database\n :type pro_schedules: list[array]\n :param month_schedule: monthly schedules from the archetype database\n :type month_schedule: ndarray\n\n :return occ: occupancy schedule for each hour of the year\n :type occ: list[float]\n :return el: electricity schedule for each hour of the year\n :type el: list[float]\n :return dhw: domestic hot water schedule for each hour of the year\n :type dhw: list[float]\n :return pro: process electricity schedule for each hour of the year\n :type pro: list[float]\n\n \"\"\"\n\n occ = []\n el = []\n dhw = []\n pro = []\n\n if dhw_schedules[0].sum() != 0:\n dhw_weekday_max = dhw_schedules[0].sum() ** -1\n else: dhw_weekday_max = 0\n\n if dhw_schedules[1].sum() != 0:\n dhw_sat_max = dhw_schedules[1].sum() ** -1\n else: dhw_sat_max = 0\n\n if dhw_schedules[2].sum() != 0:\n dhw_sun_max = dhw_schedules[2].sum() ** -1\n else: dhw_sun_max = 0\n\n for date in dates:\n month_year = month_schedule[date.month - 1]\n hour_day = date.hour\n dayofweek = date.dayofweek\n if 0 <= dayofweek < 5: # weekday\n occ.append(occ_schedules[0][hour_day] * month_year)\n el.append(el_schedules[0][hour_day] * month_year)\n dhw.append(dhw_schedules[0][hour_day] * month_year * dhw_weekday_max) # normalized dhw demand flow rates\n pro.append(pro_schedules[0][hour_day] * month_year)\n elif dayofweek is 5: # saturday\n occ.append(occ_schedules[1][hour_day] * month_year)\n el.append(el_schedules[1][hour_day] * month_year)\n dhw.append(dhw_schedules[1][hour_day] * month_year * dhw_sat_max) # normalized dhw demand flow rates\n pro.append(pro_schedules[1][hour_day] * month_year)\n else: # sunday\n occ.append(occ_schedules[2][hour_day] * month_year)\n el.append(el_schedules[2][hour_day] * month_year)\n dhw.append(dhw_schedules[2][hour_day] * month_year * dhw_sun_max) # normalized dhw demand flow rates\n pro.append(pro_schedules[2][hour_day] * month_year)\n\n return occ, el, dhw, pro\n\n # get internal loads and indoor comfort from archetypes\n archetypes_internal_loads = pd.read_excel(locator.get_archetypes_properties(), 'INTERNAL_LOADS').set_index('Code')\n archetypes_indoor_comfort = pd.read_excel(locator.get_archetypes_properties(), 'INDOOR_COMFORT').set_index('Code')\n\n # create empty list of archetypal schedules and occupant densities\n schedules = []\n occ_densities = []\n\n # create empty lists for the values of each archetype's ventilation and internal loads\n Qs_Wm2 = []\n X_ghm2 = []\n Ea_Wm2 = []\n El_Wm2 = []\n Epro_Wm2 = []\n Ere_Wm2 = []\n Ed_Wm2 = []\n Vww_ldm2 = []\n Vw_ldm2 = []\n Ve_lsm2 = []\n Qhpro_Wm2 = []\n\n for use in list_uses:\n # read from archetypes_schedules and properties\n archetypes_schedules = pd.read_excel(locator.get_archetypes_schedules(), use).T\n\n # read lists of every daily profile\n occ_schedules, el_schedules, dhw_schedules, pro_schedules, month_schedule, area_per_occupant = read_schedules(\n use, archetypes_schedules)\n\n # get occupancy density per schedule in a list\n if area_per_occupant != 0:\n occ_densities.append(1 / area_per_occupant)\n else:\n occ_densities.append(area_per_occupant)\n\n # get internal loads per schedule in a list\n Ea_Wm2.append(archetypes_internal_loads['Ea_Wm2'][use])\n El_Wm2.append(archetypes_internal_loads['El_Wm2'][use])\n Epro_Wm2.append(archetypes_internal_loads['Epro_Wm2'][use])\n Ere_Wm2.append(archetypes_internal_loads['Ere_Wm2'][use])\n Ed_Wm2.append(archetypes_internal_loads['Ed_Wm2'][use])\n Qs_Wm2.append(archetypes_internal_loads['Qs_Wp'][use])\n X_ghm2.append(archetypes_internal_loads['X_ghp'][use])\n Vww_ldm2.append(archetypes_internal_loads['Vww_lpd'][use])\n Vw_ldm2.append(archetypes_internal_loads['Vw_lpd'][use])\n Ve_lsm2.append(archetypes_indoor_comfort['Ve_lps'][use])\n Qhpro_Wm2.append(archetypes_internal_loads['Qhpro_Wm2'][use])\n\n # get yearly schedules in a list\n schedule = get_yearly_vectors(dates, occ_schedules, el_schedules, dhw_schedules, pro_schedules, month_schedule)\n schedules.append(schedule)\n\n archetype_values = {'people': occ_densities, 'Qs': Qs_Wm2, 'X': X_ghm2, 'Ea': Ea_Wm2, 'El': El_Wm2,\n 'Epro': Epro_Wm2, 'Ere': Ere_Wm2, 'Ed': Ed_Wm2, 'Vww': Vww_ldm2,\n 'Vw': Vw_ldm2, 've': Ve_lsm2, 'Qhpro': Qhpro_Wm2}\n\n return schedules, archetype_values", "def handleEvents(self, events):\n self.virtual_time = events[0].timestamp\n now = self.virtual_time\n\n # handle events based on type\n for evt in events:\n logging.debug('\\t Handle %s' % str(evt))\n if evt.evt_type == BBEventType.Submitted:\n self.scheduler.insertToInputQ(evt.job)\n elif evt.evt_type == BBEventType.FinishIn:\n self.scheduler.insertToRunQ(evt.job)\n elif evt.evt_type == BBEventType.ReleaseInBB:\n self.scheduler.releaseBB(evt.job.demand.data_in)\n elif evt.evt_type == BBEventType.FinishRun:\n self.scheduler.insertToOutputQ(evt.job)\n elif evt.evt_type == BBEventType.ReleaseRunCN:\n self.scheduler.releaseCN(evt.job.demand.num_core)\n elif evt.evt_type == BBEventType.FinishOut:\n self.scheduler.insertToCompleteQ(evt.job)\n else:\n logging.warn('\\t Unable to handle event %s' % str(evt))\n jobs = self.scheduler.schedule(now)\n if jobs:\n new_events = self.generator.generateEvents(jobs)\n for evt in new_events:\n self.event_q.append(evt)", "def _set_time_bnds(in_dir, var):\n # This is a complicated expression, but necessary to keep local\n # variables below the limit, otherwise prospector complains.\n cubelist = iris.load(\n glob.glob(\n os.path.join(in_dir, var['file'].replace('c3s', 'c3s_regridded'))))\n\n # The purpose of the following loop is to remove any attributes\n # that differ between cubes (otherwise concatenation over time fails).\n # In addition, care is taken of the time coordinate, by adding the\n # time_coverage attributes as time_bnds to the time coordinate.\n for n_cube, _ in enumerate(cubelist):\n time_coverage_start = cubelist[n_cube].\\\n attributes.pop('time_coverage_start')\n time_coverage_end = cubelist[n_cube].\\\n attributes.pop('time_coverage_end')\n\n # Now put time_coverage_start/end as time_bnds\n # Convert time_coverage_xxxx to datetime\n bnd_a = datetime.strptime(time_coverage_start, \"%Y-%m-%dT%H:%M:%SZ\")\n bnd_b = datetime.strptime(time_coverage_end, \"%Y-%m-%dT%H:%M:%SZ\")\n\n # Put in shape for time_bnds\n time_bnds_datetime = [bnd_a, bnd_b]\n\n # Read dataset time unit and calendar from file\n dataset_time_unit = str(cubelist[n_cube].coord('time').units)\n dataset_time_calender = cubelist[n_cube].coord('time').units.calendar\n # Convert datetime\n time_bnds = cf_units.date2num(time_bnds_datetime, dataset_time_unit,\n dataset_time_calender)\n # Put them on the file\n cubelist[n_cube].coord('time').bounds = time_bnds\n\n return cubelist", "def _makespan(sched_mapping_list):\n start = reduce(min, [x.start_time for x in sched_mapping_list], 0.)\n end = reduce(max, [x.end_time for x in sched_mapping_list], 0.)\n return end - start", "def get_data(self):\n# epoch_from = 1301641200\n# epoch_to = epoch_from+60*60*24\n \"\"\"\n letting runs finish for 2 more hours\n ideally, want to make this a function of time from schedule plus some\n variation, like 1 hour just in case\n \"\"\" \n# epoch_to_adjusted = epoch_to + 7200\n conn = self.connect_to_mongo()\n db = conn.muni\n \n# print \"==== Collecting starting runs from %s to %s ====\"\\\n# % (str(time.ctime(epoch_from)), str(time.ctime(epoch_to)))\n \"\"\"\n > db.location.find({loc:{$within:{$center:[[37.80241, -122.4364],\n 0.01]}}})\n > db.location.find({loc:{$within:{$center:[[37.76048, -122.38895],\n 0.002]}}})\n \"\"\"\n bus_ids = db.location.find({'route':self.route_name}).distinct(\"bus_id\")\n for bus_id in bus_ids:\n c_start = db.location.find({\"bus_id\":bus_id,\n \"loc\":{\"$within\":{\"$center\":[[self.start_lat, self.start_lon],\n self.start_prec]}}\n }).sort(\"cur_time\", DESCENDING)\n self.massage_start_data(c_start)\n \"\"\"\n TODO: the end point seems to be too nice to Muni, need to tighten\n the circle a little\n \"\"\"\n c_end = db.location.find({\"bus_id\":bus_id,\n \"loc\":{\"$within\":{\"$center\":[[self.end_lat, self.end_lon],\n self.end_prec]}}\n }).sort(\"cur_time\", ASCENDING)\n self.massage_end_data(c_end)\n if self.to_log:\n print self.start_bus_ids_to_times\n print self.end_bus_ids_to_times\n \n return self.start_bus_ids_to_times, self.end_bus_ids_to_times", "def get_itds(timestamps, ears, types, max_itd=800e-6, save_to_file=None, verbose=False, return_itd_indices=False):\n ears = ears.astype(np.bool)\n itds_to_return = np.zeros(timestamps.size, dtype=np.float32)\n itds_to_return.fill(-5. * max_itd)\n\n timestamps_dict = {}\n timestamp_indices_dict = {}\n for ear in np.unique(ears):\n timestamps_dict[ear] = {}\n timestamp_indices_dict[ear] = {}\n for type_of_event in np.unique(types):\n timestamps_dict[ear][type_of_event] = []\n timestamp_indices_dict[ear][type_of_event] = []\n\n for idx, (timestamp, ear, type_of_event) in enumerate(zip(timestamps, ears, types)):\n timestamps_dict[ear][type_of_event].append(timestamp)\n timestamp_indices_dict[ear][type_of_event].append(idx)\n\n if verbose:\n print('Initialized the timestamp lists.')\n\n bar = progressbar.ProgressBar() if verbose else lambda x: x\n\n for type_of_event in bar(np.unique(types)):\n timestamps_left = np.array(timestamps_dict[True][type_of_event])\n timestamp_indices_left = timestamp_indices_dict[True][type_of_event]\n timestamps_right = np.array(timestamps_dict[False][type_of_event])\n timestamp_indices_right = timestamp_indices_dict[False][type_of_event]\n\n for ts_right, ts_idx_right in zip(timestamps_right, timestamp_indices_right):\n matched_indices = np.where((timestamps_left >= ts_right - max_itd) &\n (timestamps_left < ts_right + max_itd))[0]\n if matched_indices.size > 0:\n matched_itds = ts_right - timestamps_left[matched_indices]\n min_itd = np.argmin(np.abs(matched_itds))\n itds_to_return[ts_idx_right] = matched_itds[min_itd]\n\n for ts_left, ts_idx_left in zip(timestamps_left, timestamp_indices_left):\n matched_indices = np.where((timestamps_right >= ts_left - max_itd) &\n (timestamps_right < ts_left + max_itd))[0]\n if matched_indices.size > 0:\n matched_itds = timestamps_right[matched_indices] - ts_left\n min_itd = np.argmin(np.abs(matched_itds))\n itds_to_return[ts_idx_left] = matched_itds[min_itd]\n\n itd_indices = np.where(itds_to_return > -4. * max_itd)[0]\n itds_to_return = itds_to_return[itd_indices]\n if save_to_file is not None:\n np.savez(save_to_file, timestamps=timestamps[itd_indices], ears=ears[itd_indices], types=types[itd_indices],\n itds=itds_to_return, itd_indices=itd_indices)\n\n if return_itd_indices:\n return itds_to_return, itd_indices\n\n return itds_to_return", "def select_windows(start, stop, num_windows,\n window_width=1, window_units=\"D\",\n sampling=1, sampling_units=\"T\",\n no_overlaps=True, verbose=True):\n\n # Create all sample candidates\n dt_range = pd.date_range(start, stop-pd.Timedelta(window_width),\n freq=\"%i%s\" % (sampling, sampling_units))\n\n # Sample candidate windows\n selected_windows = np.random.choice(dt_range, num_windows, replace=False)\n selected_windows = pd.DataFrame(selected_windows, columns=[\"start\"])\n\n # Calculate window end\n end_delta = (pd.Timedelta(window_width, unit=window_units)\n - pd.Timedelta(sampling,\n unit=\"m\" if sampling_units==\"T\" else sampling_units))\n selected_windows[\"end\"] = (selected_windows[\"start\"] + end_delta)\n\n # Filter overlaps\n if not no_overlaps:\n return selected_windows\n else:\n selected_windows = filter_overlaps(selected_windows,\n pd.Timedelta(window_width,\n unit=window_units))\n\n while selected_windows.shape[0]<num_windows:\n if verbose:\n print(\"Got %i windows...\" % selected_windows.shape[0])\n\n selected_windows = pd.concat([selected_windows,\n select_windows(start, stop, num_windows,\n window_width, window_units,\n sampling, sampling_units,\n no_overlaps=False)],\n ignore_index=True)\n selected_windows = filter_overlaps(selected_windows,\n pd.Timedelta(window_width,\n unit=window_units))\n return selected_windows.iloc[:num_windows]", "def generate_fire_time_series(self):\r\n self.fire_events =[]\r\n event = self.generate_fire_recurrence()\r\n end_event = event + 365.0\r\n self.fire_events.append([event, end_event])\r\n t = 0\r\n i = 0\r\n while t <= self.total_run_time:\r\n fire = self.generate_fire_recurrence()\r\n start_fire = self.fire_events[i][0] + (fire)\r\n end_fire = start_fire + (365.0) \r\n self.fire_events.append([start_fire, end_fire])\r\n t += end_fire\r\n i+=1", "def handle_outgoing_sub_events(sub_events: List[any]):\n events = [core.create_event(sub_event) for sub_event in sub_events]\n core.push_events(events)", "def split_events(event_data: dict, event_splitting_params: dict, n_jobs=-1) -> dict:\n # check that event_data is of good form.\n assert {'event_codes', 'event_times'} == set(event_data.keys())\n n_trial = len(event_data['event_codes'])\n assert n_trial == len(event_data['event_times'])\n assert n_trial >= 1\n\n # no memmaping, since trials are usually short.\n pool = Parallel(n_jobs=n_jobs, max_nbytes=None)\n split_result = pool(\n delayed(_split_events_per_trial)(t_idx, codes, times, event_splitting_params) for t_idx, (codes, times) in\n enumerate(zip(event_data['event_codes'],\n event_data['event_times'])))\n\n result = _assemble_result(split_result, n_trial)\n _check_output(result)\n\n return result", "def process_events(cat_data, n_run, cfg, sta_locs):\n import time\n import os\n import shutil\n import sys\n import logging\n from obspy import read\n from obspy.geodetics.base import gps2dist_azimuth\n import matplotlib.pyplot as plt\n\n if cfg.output.FORCE_RECALC is True:\n w = open(\"refined_events.dat\", \"w\")\n w.close()\n if cfg.plotting.DO_PLOT_1 is True or cfg.plotting.DO_PLOT_2 is True:\n fig = plt.figure(figsize=(18, 10))\n else:\n fig = []\n # Prepare directory\n if (os.path.exists(\"runs/run{:}\".format(n_run))\n and os.path.isdir(\"runs/run{:}\".format(n_run))):\n shutil.rmtree(\"runs/run{:}\".format(n_run))\n copytree(\"NLLOC_run\", \"runs/run{:}\".format(n_run))\n os.chdir(\"runs/run{:}\".format(n_run))\n for n_ev, ev in enumerate(cat_data):\n start = time.time()\n ev_id = ev.event_descriptions[0].text\n sys.stdout.flush()\n ev_dict = {}\n ev_dict[\"stations\"] = {}\n orig_lat, orig_lon = [ev.origins[0].latitude, ev.origins[0].longitude]\n logging.debug(\"startint logging\")\n st = read(\"../../{:}/{:}/MSEED/*.msd\".format(\n cfg.input.DIR_TO_EVENTDIRS, ev_id), format=\"MSEED\")\n print(n_run, ev_id)\n for n_tr, tr in enumerate(st):\n if st[n_tr].stats.sampling_rate > 40.0:\n try:\n st[n_tr].resample(40)\n except ZeroDivisionError:\n continue\n st1, st2, st_mag = [st.copy(), st.copy(), st.copy()]\n # Append distance to trace\n stations_data = sorted(set([tr.stats.station for tr in st\n if tr.stats.station not in\n cfg.sta_select.STA_BLACKLIST]))\n stations_dist = {sta_code: gps2dist_azimuth(\n sta_locs[sta_code][\"lat\"], sta_locs[sta_code][\"lon\"],\n orig_lat, orig_lon)[0] for sta_code in stations_data\n if gps2dist_azimuth(\n sta_locs[sta_code][\"lat\"], sta_locs[sta_code][\"lon\"],\n orig_lat, orig_lon)[0]/1000 <= cfg.sta_select.MAX_DIST}\n path_to_figs = \"../../{:}/{:}/figs\".format(\n cfg.input.DIR_TO_EVENTDIRS, ev_id)\n if not os.path.exists(path_to_figs):\n os.mkdir(path_to_figs)\n print(\"Doing first refinement\")\n sys.stdout.flush()\n if (\"R\" in cfg.picking.CMPS_REFINE_1[\"S\"] or\n \"T\" in cfg.picking.CMPS_REFINE_1[\"S\"]):\n rot = True\n else:\n rot = False\n evt_refine_1, rms, found = refine_events(\n st1, stations_dist, cfg.picking.CMPS_REFINE_1,\n cfg.picking.MAX_PICK_DIFF_REFINE1, ev,\n cfg.ploting.DO_PLOT_1, 1, fig, \"const\", path_to_figs, ev_dict,\n ev_id, cfg, rot\n )\n if found is False:\n continue\n print(\"RMS = \", rms)\n sys.stdout.flush()\n prev_rms = rms\n print(\"Doing second refinement\")\n sys.stdout.flush()\n if (\"R\" in cfg.picking.CMPS_REFINE_2[\"S\"] or\n \"T\" in cfg.picking.CMPS_REFINE_2[\"S\"]):\n rot = True\n else:\n rot = False\n evt_refine_2, rms, found = refine_events(\n st2, stations_dist, cfg.picking.CMPS_REFINE_2,\n cfg.picking.MAX_PICK_DIFF_REFINE2, evt_refine_1,\n cfg.plotting.DO_PLOT_2, 2, fig, \"dist\", path_to_figs, ev_dict,\n ev_id, rot\n )\n if found is False:\n continue\n print(\"RMS = \", rms)\n if rms > prev_rms * 1.25:\n print(\"RMS is significantly increasing (*25%) - skipping event\")\n continue\n prev_rms = rms\n evt_refine_2 = compute_magnitude(evt_refine_2, st_mag, cfg)\n write_evt(evt_refine_2, ev_id)\n end = time.time()\n print(\"Time taken for event: {:3.1f} mins\".format((end-start)/60))", "def get_events(start_date, end_date, source=utils.get_native_source, **kwargs):\n if not isinstance(source, games.models.Source):\n source = source()\n logger.info(\"getting events from source %s...\", source)\n if not source:\n return []\n # with open('sportmonks/response_texts/fixtures_{}-{}.txt'.format(start_date.strftime('%Y-%m-%d'),\n # end_date.strftime('%Y-%m-%d')), 'w') as outfile:\n # season is necessary so that the season object is extracted and used\n include = kwargs.get('include', '')\n include = ','.join([include, 'season']) if include else 'season'\n kwargs['include'] = include\n data, meta, status_code = sportmonks.fixtures.by_date_range(start_date=start_date, end_date=end_date, **kwargs)\n # json.dump(data, outfile, indent=4)\n if not data:\n return []\n pre_events = []\n try:\n num_fetched_objects = len(data)\n except:\n num_fetched_objects = None\n num_processed_objects = 0\n try:\n for obj in data:\n num_processed_objects += 1\n try:\n sid = obj.get('id', None)\n time = obj.get('time', dict())\n starting_at = time.get('starting_at', dict())\n event_datetime = get_date(starting_at, 'date_time')\n # custom_timezone = pytz.timezone('Europe/Athens')\n # event_datetime = event_datetime.astimezone(custom_timezone)\n home_team_sid = obj.get('localteam_id', None)\n away_team_sid = obj.get('visitorteam_id', None)\n competition_season_sid = obj.get('season_id', None)\n season_string = obj.get('season', {}).get('data', {}).get('name')\n stage_sid = obj.get('stage_id', None)\n round_sid = obj.get('round_id', None)\n competition_sid = obj.get('league_id', None)\n except Exception as e:\n logger.data_error('%s', e)\n continue\n\n zak_season_name = games.models.Season.zakandify_season_string(season_string)\n season = zakanda.utils.season_from_season_name(zak_season_name)\n if not season:\n logger.data_error('Could not extract season object from season string: %s', season_string)\n continue\n\n # todo sportmonks fix\n # if the event involves a problematic team it is not created in order to avoid future problems\n if is_in_problematic_teams(home_team_sid):\n home_team_sid = None\n if is_in_problematic_teams(away_team_sid):\n away_team_sid = None\n\n competition_seasons = games.models.CompetitionSeason.by_sid(competition_season_sid, source, season)\n try:\n competition_season = competition_seasons.first() # only one entity exists in the queryset\n except Exception as e:\n logger.warning('%s', e)\n competition_season = None\n\n home_team = games.models.Team.by_sid(home_team_sid, source)\n away_team = games.models.Team.by_sid(away_team_sid, source)\n pre_event = pre_models.PreEvent(source, sid, event_datetime, home_team, away_team, competition_season)\n pre_events.append(pre_event)\n except Exception as e:\n logger.error('%s Unexpected problem with sportmonks.fixtures.by_date_range %s %s from source %s',\n e, start_date, end_date, source)\n logger.info(\"%s event objects were contained in the response\", num_fetched_objects)\n logger.info(\"%s event objects were processed\", num_processed_objects)\n logger.info(\"%s pre events were created\", len(pre_events))\n return pre_events", "def event_timex_analysis(event1, event2):\n tagged = tag(event1.text)\n base_time = event1.get_best_time()\n\n if base_time is not None:\n dt, trusted = base_time.to_datetime() # Get the datetime representation of the reference's best_time\n grounded_times = ground(tagged, dt) # Ground any timex tags to that time\n new_dates = [] # holds new dates constructed from the grounded datetimes\n\n for time in grounded_times:\n new_date = Date()\n if trusted['year']:\n new_date.year = time.year\n\n if trusted['month']:\n new_date.month = time.month\n\n if trusted['day']:\n new_date.day = time.day\n\n if trusted['hour']:\n new_date.hour = time.hour\n\n if trusted['minute']:\n new_date.minute = time.minute\n\n new_dates.append(new_date)\n\n if len(new_dates) == 0: # Nothing interesting found.\n return\n\n new_dates = sorted(new_dates, lambda x: x.precision(), reverse=True)\n best_date = new_dates[0]\n\n other_best_date = event2.get_best_time()\n if other_best_date is not None:\n if best_date.precision() > other_best_date.precision():\n event2.set_best_time(best_date)\n else:\n event2.set_best_time(best_date)", "def atwork_subtour_scheduling(\n tours,\n persons_merged,\n tdd_alts,\n tdd_subtour_spec,\n atwork_subtour_scheduling_settings,\n configs_dir,\n chunk_size,\n trace_hh_id):\n\n trace_label = 'atwork_subtour_scheduling'\n constants = config.get_model_constants(atwork_subtour_scheduling_settings)\n\n persons_merged = persons_merged.to_frame()\n\n tours = tours.to_frame()\n subtours = tours[tours.tour_category == 'subtour']\n\n logger.info(\"Running atwork_subtour_scheduling with %d tours\" % len(subtours))\n\n # parent_tours table with columns ['tour_id', 'tdd'] index = tour_id\n parent_tour_ids = subtours.parent_tour_id.astype(int).unique()\n parent_tours = pd.DataFrame({'tour_id': parent_tour_ids}, index=parent_tour_ids)\n parent_tours = parent_tours.merge(tours[['tdd']], left_index=True, right_index=True)\n\n \"\"\"\n parent_tours\n tour_id tdd\n 20973389 20973389 26\n 44612864 44612864 3\n 48954854 48954854 7\n \"\"\"\n\n tdd_choices = vectorize_subtour_scheduling(\n parent_tours,\n subtours,\n persons_merged,\n tdd_alts, tdd_subtour_spec,\n constants=constants,\n chunk_size=chunk_size,\n trace_label=trace_label)\n assign_in_place(subtours, tdd_choices)\n\n expressions.assign_columns(\n df=subtours,\n model_settings='annotate_tours',\n configs_dir=configs_dir,\n trace_label=trace_label)\n\n assign_in_place(tours, subtours)\n pipeline.replace_table(\"tours\", tours)\n\n tracing.dump_df(DUMP,\n tt.tour_map(parent_tours, subtours, tdd_alts, persons_id_col='parent_tour_id'),\n trace_label, 'tour_map')\n\n if trace_hh_id:\n tracing.trace_df(subtours,\n label=\"atwork_subtour_scheduling\",\n slicer='person_id',\n index_label='tour_id',\n columns=None,\n warn_if_empty=True)", "def filter_windows(sliding_windows_file, genes_file, output_file):\n\n\t# Read sliding windows file and create a list in the form\n\t# genes = [('gene1', 1000, 2000), ('gene2', 4000, 45000)]\n\tgenes = []\t\t# this could be a dictionary but I prefer not\n\tfor line in genes_file:\n\t\tline = line.strip()\n\n\t\tif line and not line.startswith('#'):\t\t# if line is not empty and not a comment\n#\t\tif line and re.match('\\d+', line):\n\t\t\tlogging.debug((\"line: %s\" %line))\n\t\t\tfields = line.split()\t\t# it is better to use the default splitting algorithm here.\n\t\t\t\t\t\t\t\t\t\t# read help(''.split)\t\n\n\t\t\tgene_name = fields[0]\n\t\t\tlogging.debug((\"fields: %s\" %fields))\n\t\t\tstart = int(fields[2])\n\t\t\tend = int(fields[3].strip())\t\t# remove \\n\\r, like chomp\n\t\t\tgenes.append((gene_name, start, end))\n\t\t\t\n#\tlogging.debug((\"genes :\", genes))\t\t# print the contents of genes, if level=loggin.DEBUG\n\n\t# read sliding windows file, and select windows that fall in genes\n\toutput = '#gene_name, gene_start, gene_end, window_start, window_middle, window_end, population, number, score\\n'\n\toutputlineskeleton = \"%s\\t%d\\t%d\\t%d\\t%d\\t%d\\t%s\\t%s\\t%s\\n\"\t# %(gene_name, gene_start, gene_end, window_start, window_middle, window_end, population, number, score)\n\n\tfor line in sliding_windows_file:\n\t\tline = line.strip()\t\t# remove trailing characters (like chomp)\n\t\tif line and not line.startswith('#'):\n\t\t\twindow_fields = line.split()\n\n#\t\t\tlogging.debug(window_fields)\n\t\t\twindow_start = int(window_fields[0])\n\t\t\twindow_middle = int(window_fields[2])\n\t\t\twindow_end = int(window_fields[1])\n#\t\t\tgene = window_fields[3]\n\t\t\tpopulation = window_fields[4]\n\t\t\tnumber = window_fields[5]\n\t\t\tscore = window_fields[6]\n\n\t\t\tfor gene in genes:\n\t\t\t\tgene_start = int(gene[1])\n\t\t\t\tgene_end = int(gene[2])\n\t\t\t\tgene_name = gene[0]\n\t\t\t\t# if window_start is comprised between gene_end and gene_start\n\t\t\t\tif gene_end > window_start >= gene_start:\n\t\t\t\t\tlogging.debug(\"This window starts inside gene %s (%s, %s)\" %(gene[0], gene_start, gene_end))\n\t\t\t\t\tlogging.debug(line)\n\t\t\t\t\toutput += outputlineskeleton % (gene_name, gene_start, gene_end, window_start, window_middle, window_end, population, number, score)\n\t\t\t\telif gene_end >= window_end > gene_start:\n\t\t\t\t\tlogging.debug(\"This window ends inside gene %s (%s, %s)\" %(gene[0], gene_start, gene_end))\n\t\t\t\t\tlogging.debug(line)\n\t\t\t\t\toutput += outputlineskeleton % (gene_name, gene_start, gene_end, window_start, window_middle, window_end, population, number, score)\n\t\n\tlogging.debug(output)\n\toutput_file.write(output)\n\toutput_file.seek(0)\n\treturn output_file", "def check_tracking_periods(tracking_start_times_unix_sec,\n tracking_end_times_unix_sec):\n\n error_checking.assert_is_integer_numpy_array(tracking_start_times_unix_sec)\n error_checking.assert_is_numpy_array(\n tracking_start_times_unix_sec, num_dimensions=1)\n\n num_tracking_periods = len(tracking_start_times_unix_sec)\n these_expected_dim = numpy.array([num_tracking_periods], dtype=int)\n\n error_checking.assert_is_integer_numpy_array(tracking_end_times_unix_sec)\n error_checking.assert_is_numpy_array(\n tracking_end_times_unix_sec, exact_dimensions=these_expected_dim)\n\n error_checking.assert_is_geq_numpy_array(\n tracking_end_times_unix_sec - tracking_start_times_unix_sec, 0)\n\n start_time_sort_indices = numpy.argsort(tracking_start_times_unix_sec)\n end_time_sort_indices = numpy.argsort(tracking_end_times_unix_sec)\n\n if not numpy.array_equal(start_time_sort_indices, end_time_sort_indices):\n tracking_start_time_strings = [\n time_conversion.unix_sec_to_string(t, LOG_MESSAGE_TIME_FORMAT)\n for t in tracking_start_times_unix_sec\n ]\n\n tracking_end_time_strings = [\n time_conversion.unix_sec_to_string(t, LOG_MESSAGE_TIME_FORMAT)\n for t in tracking_end_times_unix_sec\n ]\n\n print('\\n')\n for k in range(len(tracking_start_time_strings)):\n print('{0:d}th tracking period = {1:s} to {2:s}'.format(\n k + 1, tracking_start_time_strings[k],\n tracking_end_time_strings[k]\n ))\n\n print('\\n')\n raise ValueError(\n 'As shown above, start/end times of tracking periods are not sorted'\n ' in the same order.'\n )\n\n tracking_start_times_unix_sec = tracking_start_times_unix_sec[\n start_time_sort_indices\n ]\n tracking_end_times_unix_sec = tracking_end_times_unix_sec[\n start_time_sort_indices\n ]\n\n error_checking.assert_is_geq_numpy_array(\n tracking_end_times_unix_sec - tracking_start_times_unix_sec, 0\n )\n error_checking.assert_is_greater_numpy_array(\n tracking_start_times_unix_sec[1:] - tracking_end_times_unix_sec[:-1], 0\n )\n\n return tracking_start_times_unix_sec, tracking_end_times_unix_sec", "def gen_start_end_times(start_time=[6, 0, 0], end_time=[23, 0, 0]):\n\n now = datetime.now()\n year = now.year\n month = now.month\n day = now.day\n\n start_time = datetime(\n year, month, day, start_time[0], start_time[1], start_time[2], 0\n )\n\n end_time = datetime(year, month, day, end_time[0], end_time[1], end_time[2], 0)\n\n if end_time < now:\n end_time += timedelta(days=1)\n start_time += timedelta(days=1)\n\n return start_time, end_time", "def test_non_overlapping_events(sample_events, woodshop, caplog):\n caplog.set_level(logging.INFO)\n event1, event2 = sample_events.make_non_overlapping_events()\n woodshop.start_event(event1)\n woodshop.log_conflicts(event1.start_time)\n woodshop.end_event(event1)\n woodshop.log_conflicts(event1.end_time)\n woodshop.start_event(event2)\n woodshop.log_conflicts(event2.start_time)\n woodshop.end_event(event2)\n woodshop.log_conflicts(event2.end_time)\n assert caplog.text == \"\"", "def getTimeSegments(segments,bounds,radius,starttime,endtime,magrange,catalog,contributor):\n stime = starttime\n etime = endtime\n \n dt = etime - stime\n dtseconds = dt.days*86400 + dt.seconds\n #segment 1\n newstime = stime\n newetime = stime + timedelta(seconds=dtseconds/2)\n nevents,maxevents = getEventCount(bounds=bounds,radius=radius,starttime=newstime,endtime=newetime,\n magrange=magrange,catalog=catalog,contributor=contributor)\n if nevents < maxevents:\n segments.append((newstime,newetime))\n else:\n segments = getTimeSegments(segments,bounds,radius,newstime,newetime,\n magrange,catalog,contributor)\n #segment 2\n newstime = newetime\n newetime = etime\n nevents,maxevents = getEventCount(bounds=bounds,radius=radius,\n starttime=newstime,endtime=newetime,\n magrange=magrange,catalog=catalog,\n contributor=contributor)\n if nevents < maxevents:\n segments.append((newstime,newetime))\n else:\n segments = getTimeSegments(segments,bounds,radius,newstime,newetime,\n magrange,catalog,contributor)\n\n return segments", "def reformat_events(self, data, features):\n # Update alias\n if isinstance(features, list):\n [x.update(dict(alias=self.alias[(x[features[0]],\n x[features[1]])])) for x in data]\n else:\n [x.update(dict(alias=self.alias[x[features]])) for x in data]\n temp_data = list()\n # define ordering keys and columns\n if self.one_timestamp:\n columns = ['alias', 'duration', 'dur_act_norm']\n sort_key = 'end_timestamp'\n else:\n sort_key = 'start_timestamp'\n columns = ['alias', 'processing_time',\n 'proc_act_norm', 'waiting_time', 'wait_act_norm']\n data = sorted(data, key=lambda x: (x['caseid'], x[sort_key]))\n for key, group in itertools.groupby(data, key=lambda x: x['caseid']):\n trace = list(group)\n temp_dict = dict()\n for col in columns:\n serie = [y[col] for y in trace]\n if col == 'alias':\n temp_dict = {**{'profile': serie}, **temp_dict}\n else:\n serie = [y[col] for y in trace]\n temp_dict = {**{col: serie}, **temp_dict}\n temp_dict = {**{'caseid': key, 'start_time': trace[0][sort_key],\n 'end_time': trace[-1][sort_key]},\n **temp_dict}\n temp_data.append(temp_dict)\n return sorted(temp_data, key=itemgetter('start_time'))", "def get_next_available_open_timeset(\n a_timestamp: str, list_of_timesets: list, debug_mode: bool = False\n) -> dict:\n\n results = {\"next_free_timeset\": None, \"reached_end_of_list\": True}\n\n sorted_list_of_timesets = sorted(list_of_timesets, key=lambda k: k[0])\n\n filtered_list_of_timesets = []\n for timeset in sorted_list_of_timesets:\n if datetime.fromisoformat(a_timestamp) <= datetime.fromisoformat(timeset[1]):\n filtered_list_of_timesets.append(timeset)\n\n # get rid of timesets that end before timestamp\n if filtered_list_of_timesets != sorted_list_of_timesets:\n print_time_data(\n \"Next available_timeset: filtering effect from:\",\n sorted_list_of_timesets,\n debug_mode,\n )\n print_time_data(\n \"Next available_timeset: filtering effect to:\",\n filtered_list_of_timesets,\n debug_mode,\n )\n\n # the last timeset triggers some actions. However if the last is also the first\n # i.e. list of 1 timeset, then its too early to set off the trigger\n index_of_last_timeset = (len(filtered_list_of_timesets) - 1) or 1\n\n temp_timestamp = a_timestamp\n\n for timeset_index, timeset in enumerate(filtered_list_of_timesets):\n if datetime.fromisoformat(timeset[0]) > datetime.fromisoformat(temp_timestamp):\n\n results[\"next_free_timeset\"] = [temp_timestamp, timeset[0]]\n if timeset_index != index_of_last_timeset:\n results[\"reached_end_of_list\"] = False\n\n print_time_data(\n \"Next available_timeset: Going to break: current timeset\",\n timeset,\n debug_mode,\n )\n print_time_data(\n \"Next available_timeset: Going to break: timestamp\",\n temp_timestamp,\n debug_mode,\n )\n print_time_data(\n \"Next available_timeset: Going to break: results\", results, debug_mode\n )\n break\n\n temp_timestamp = timeset[1]\n\n # Check if the found timeset has a startTime\n # inside another timeset\n if results[\"next_free_timeset\"]:\n temp_timeset = validate_update_timestamp(\n results[\"next_free_timeset\"], filtered_list_of_timesets, debug_mode\n )\n results[\"next_free_timeset\"] = temp_timeset\n\n print_time_data(\"Next available_timeset: Final results\", results, debug_mode)\n\n return results", "def get_timeline_events(self, req, start, stop, filters):", "def events_info(request):\n \n global input\n \n if request == 'event-based':\n client_neries = Client_neries()\n \n events = client_neries.getEvents(min_datetime=input['min_date'], \\\n max_datetime=input['max_date'], min_magnitude=input['min_mag'], \\\n max_magnitude=input['max_mag'], min_latitude=input['evlatmin'], \\\n max_latitude=input['evlatmax'], min_longitude=input['evlonmin'], \\\n max_longitude=input['evlonmax'], min_depth = input['min_depth'], \\\n max_depth=input['max_depth'], max_results=input['max_result'])\n \n for i in range(0, len(events)):\n events[i]['t1'] = events[i]['datetime'] - input['preset']\n events[i]['t2'] = events[i]['datetime'] + input['offset']\n \n elif request == 'continuous':\n m_date = UTCDateTime(input['min_date'])\n M_date = UTCDateTime(input['max_date'])\n \n t_cont = M_date - m_date\n \n events = []\n \n if t_cont > input['interval']:\n num_div = int(t_cont/input['interval'])\n t_res = t_cont - num_div*input['interval']\n \n for i in range(0, num_div):\n events.append({'author': 'NAN', 'event_id': 'continuous' + str(i), \\\n 'origin_id': -12345.0, 'longitude': -12345.0, \\\n 'datetime': m_date + i*input['interval'], \\\n 't1': m_date + i*input['interval'],\\\n 't2': m_date + (i+1)*input['interval'] + 60.0,\\\n 'depth': -12345.0, 'magnitude': -12345.0, \\\n 'magnitude_type': 'NAN', 'latitude': -12345.0, \\\n 'flynn_region': 'NAN'})\n \n events.append({'author': 'NAN', 'event_id': 'continuous' + str(i+1), \\\n 'origin_id': -12345.0, 'longitude': -12345.0, \\\n 'datetime': m_date + (i+1)*input['interval'], \\\n 't1': m_date + (i+1)*input['interval'],\\\n 't2': M_date,\\\n 'depth': -12345.0, 'magnitude': -12345.0, \\\n 'magnitude_type': 'NAN', 'latitude': -12345.0, \\\n 'flynn_region': 'NAN'})\n else:\n events.append({'author': 'NAN', 'event_id': 'continuous0', \\\n 'origin_id': -12345.0, 'longitude': -12345.0, \\\n 'datetime': m_date, \\\n 't1': m_date,\\\n 't2': M_date,\\\n 'depth': -12345.0, 'magnitude': -12345.0, \\\n 'magnitude_type': 'NAN', 'latitude': -12345.0, \\\n 'flynn_region': 'NAN'})\n\n return events", "def get_events_params(args: Dict[str, Any]) -> Dict[str, Any]:\n params: Dict[str, Any] = {'event_type': 'Ips Event'}\n arg_keys = args.keys()\n\n if 'duration' in arg_keys:\n params['duration'] = args.get('duration', '')\n\n if 'start_time' in arg_keys:\n start_time = args.get('start_time', '')\n date_time = dateparser.parse(start_time)\n if date_time:\n params['start_time'] = str(\n date_time.strftime(API_SUPPORT_DATE_FORMAT)\n )\n else:\n raise ValueError(\n MESSAGES['INVALID_TIME_VALIDATION'].format('start_time')\n )\n\n if 'end_time' in arg_keys:\n end_time = args.get('end_time', '')\n date_time = dateparser.parse(end_time)\n if date_time:\n params['end_time'] = str(\n date_time.strftime(API_SUPPORT_DATE_FORMAT)\n )\n else:\n raise ValueError(\n MESSAGES['INVALID_TIME_VALIDATION'].format('end_time')\n )\n\n if 'mvx_correlated_only' in arg_keys:\n mvx_correlated_only = args.get('mvx_correlated_only', '').lower()\n try:\n mvx_correlated_only = argToBoolean(mvx_correlated_only)\n params['mvx_correlated_only'] = mvx_correlated_only\n except ValueError:\n raise ValueError(\n MESSAGES['INVALID_BOOLEAN_VALUE_ERROR'].format(\n 'mvx_correlated_only'\n )\n )\n\n return params", "def test_window():\n # Generate observations with random times\n timeline = random_timed_observation_timeline()\n\n # Defaults to one hour\n for window in timeline.windows():\n # Gotta be a tuple, though we don't know the length\n assert isinstance(window, tuple)\n assert len(window) > 0\n\n # Check the types\n for o in window:\n assert isinstance(o, Observation)\n\n # Double check that Observations in the window are sorted (for fun)\n for o1, o2 in zip(window, window[1:]):\n assert o1 < o2\n\n # Make sure each member is within an hour of the first.\n # We know they're sorted, so just check first and last.\n assert (window[0].time + timedelta(hours=1)) > window[-1].time", "def get_events_for_specific_hours(start,end):\n\tresults = session.query(\"event_name\",\"date\",\"start_time\",\"end_time\").\\\n\tfrom_statement(\"select event_name,date,start_time,end_time from event where date=curdate() and \\\n\t\tstart_time >= :starttime and end_time <= :endtime\").\\\n\tparams(starttime = start, endtime = end).all()\n\tif(len(results) > 0):\n\t\tret_dict = {}\n\t\tevents = []\n\n\t\tfor event_tuple in results:\n\t\t\ttemp = {}\n\t\t\ttemp['event_name'] = event_tuple[0]\n\t\t\ttemp['start_date'] = str(event_tuple[1])\n\t\t\ttemp['start_time'] = str(event_tuple[2])\n\t\t\ttemp['end_time'] = str(event_tuple[3])\n\t\t\tevents.append(temp)\n\n\t\tret_dict['events'] = events\n\t\treturn jsonify(ret_dict)\n\telse:\n\t\treturn \"{'events':'no results returned'}\"", "def get_itds_v3(timestamps, ears, types, max_itd=800e-6, save_to_file=None, verbose=False, return_itd_indices=False):\n ears = ears.astype(np.bool)\n itds_to_return = np.zeros(timestamps.size, dtype=np.float32)\n itds_to_return.fill(-5. * max_itd)\n\n timestamps_dict = {}\n timestamp_indices_dict = {}\n for ear in np.unique(ears):\n timestamps_dict[ear] = {}\n timestamp_indices_dict[ear] = {}\n for type_of_event in np.unique(types):\n timestamps_dict[ear][type_of_event] = []\n timestamp_indices_dict[ear][type_of_event] = []\n\n for idx, (timestamp, ear, type_of_event) in enumerate(zip(timestamps, ears, types)):\n timestamps_dict[ear][type_of_event].append(timestamp)\n timestamp_indices_dict[ear][type_of_event].append(idx)\n\n if verbose:\n print('Initialized the timestamp lists.')\n\n bar = progressbar.ProgressBar() if verbose else lambda x: x\n\n max_num_events = 5\n\n for type_of_event in bar(np.unique(types)):\n timestamps_left = np.array(timestamps_dict[True][type_of_event])\n timestamp_indices_left = timestamp_indices_dict[True][type_of_event]\n timestamps_right = np.array(timestamps_dict[False][type_of_event])\n timestamp_indices_right = timestamp_indices_dict[False][type_of_event]\n\n num_right_events = timestamps_right.shape[0]\n\n for event_idx, (ts_right, ts_idx_right) in enumerate(zip(timestamps_right, timestamp_indices_right)):\n matched_indices = np.where((timestamps_left >= ts_right - max_itd) &\n (timestamps_left < ts_right + max_itd))[0]\n if matched_indices.size > 0:\n matched_itds = ts_right - timestamps_left[matched_indices]\n min_itd_idx_local = np.argmin(np.abs(matched_itds))\n min_itd = matched_itds[min_itd_idx_local]\n # absolute index of the itd pair event\n min_itd_ts_left = ts_right - min_itd\n # now check that the itd pair for the itd pair event is the current event\n if event_idx < max_num_events:\n min_itd_ts_right = timestamps_right[0: event_idx + max_num_events + 1]\n alt_min_itd_idx = np.argmin(np.abs(min_itd_ts_left - min_itd_ts_right))\n if alt_min_itd_idx == event_idx:\n itds_to_return[ts_idx_right] = min_itd\n else:\n min_itd_ts_right = timestamps_right[event_idx - max_num_events: event_idx + max_num_events + 1]\n alt_min_itd_idx = np.argmin(np.abs(min_itd_ts_left - min_itd_ts_right))\n if alt_min_itd_idx == max_num_events:\n itds_to_return[ts_idx_right] = min_itd\n if min_itd_ts_right[0] > min_itd_ts_left - max_itd or min_itd_ts_right[-1] < min_itd_ts_left + max_itd:\n print('[WARNING] The max_num_events is not enough, please check.')\n sys.stdout.flush()\n\n itd_indices = np.where(itds_to_return > -4. * max_itd)[0]\n itds_to_return = itds_to_return[itd_indices]\n if save_to_file is not None:\n np.savez(save_to_file, timestamps=timestamps[itd_indices], ears=ears[itd_indices], types=types[itd_indices],\n itds=itds_to_return, itd_indices=itd_indices)\n\n if return_itd_indices:\n return itds_to_return, itd_indices\n\n return itds_to_return", "def process_events(self):\n self.data = pd.read_csv(\n self.args[\"data\"], delimiter=self.DataDelimiter, index_col=False)\n df_events, df_data = self.events, self.data\n\n self.log(\"[.] Timed and special events separation...\")\n\n indexer = df_events.Time.str.contains(\"\\d\\d:\\d\\d\", regex=True, na=False)\n timed_events = df_events[indexer]\n several_days_events = df_events[~indexer]\n\n self.log(\"[.] Events and data linking...\")\n\n linked_with_data = self._link_data_and_events(\n timed_events, df_data, self.args[\"limit\"], self.args[\"timezone\"])\n\n if linked_with_data.empty:\n err = \"Error occurred: linked dataframe is empty\"\n self.log(err, severe=True)\n raise ValueError(err)\n\n self.log(\"[.] Processed dataframes saving...\")\n\n # linked_with_data.to_csv(\"linked.csv\", index=False)\n # several_days_events.to_csv(\"special_events.csv\", index=False)\n folder = self.args[\"output_folder\"]\n linked_with_data.to_csv(os.path.join(folder, \"linked.csv\"), index=False)\n several_days_events.to_csv(\n os.path.join(folder, \"special_events.csv\"), index=False)", "def findguidingstop(starttime, event_list):\n for r in event_list:\n if r[0]==6 and r[1]+datetime.timedelta(seconds=0)>starttime: return r[1]\n return None", "def to_epochs(event_data, resolution=3, val_col=\"tag\"):\n if val_col != \"tag\":\n logger.warning(\"Non-tag val_col should generally not be used. Epoch conversions is last step.\"\n \"Are you sure you wish to continue?\")\n # First indentify allocations. Windows should be split and\n # merged using the individual classes. Long-form data\n # holder with contain index start end dur tutples\n holder = [[]]\n # i = 1\n for num, (index, row) in enumerate(event_data.iterrows()):\n if num % 100 == 0:\n print(f\"Handling row {num}\")\n s, e = row.start, row.end\n while s < e:\n # print(s, e)\n # i += 1\n # if i > 1000:\n # break\n cur = holder[-1]\n # Determine how much time is in the current row\n if not cur:\n cur_time = 0\n else:\n cur_time = sum(i[-1] for i in cur)\n # Amount of time that needs to be added\n to_find = resolution - cur_time\n # print(s, e, cur_time, to_find)\n # If the instance is full,\n if to_find <= 10e-8:\n # print(\"APPEND\")\n holder.append([])\n continue\n\n # Add that much time to the current holder from the current event if possible\n overlap = min(e, s + timedelta(seconds=to_find))\n # print(overlap)\n cur.append((index, s, overlap, (overlap - s).total_seconds()))\n s = overlap\n # Then reconstruct a dataframe from such (Breakdown-> compile)\n print(f\"Reconstructing holder of length {len(holder)}\")\n rows = []\n for interval_num, intervals in enumerate(holder):\n # window here is thus the start to end\n window = (intervals[0][1], intervals[-1][2])\n # Calculate the highest tag for the window\n activpal_event_dist = defaultdict(int)\n tag_dist = defaultdict(int)\n # Also the total steps for the epoch in the meantime\n epoch_steps = 0\n for index, start, end, duration in intervals:\n row = event_data.loc[index, :]\n activpal_event_dist[row[\"activpal_event\"]] += duration\n tag_dist[row[val_col]] += duration\n # Partial steps\n # print(row)\n epoch_steps += row[\"steps\"] * (end-start).total_seconds() / row[\"duration\"]\n\n event = max(activpal_event_dist.items(), key=lambda x: x[1])[0]\n tag = max(tag_dist.items(), key=lambda x: x[1])[0]\n logger.debug(f\"tag_dist{tag_dist}, selected_tag={tag}\")\n epoch_start = intervals[0][1]\n epoch_end = intervals[-1][2]\n epoch_duration = (epoch_end - epoch_start).total_seconds()\n data = {\n val_col: tag,\n \"activpal_event\": event,\n \"steps\": epoch_steps,\n \"cadence\": epoch_steps / epoch_duration * 60,\n \"start\": epoch_start,\n \"end\": epoch_end,\n \"duration\": epoch_duration,\n }\n rows.append(data)\n\n return pd.DataFrame(rows)", "def process_groups(groups, logs):\n events = list()\n \n for group in groups:\n tag = group[2]\n target = group[3]\n msg_type = group[-1].lower()\n if tag == ACTIVITY_TAG or tag == DIALOG_TAG or tag == VIEW_TAG:\n\n if group[0] == group[1]:\n if msg_type == 'touchevent':\n events.append(touch_processor.create_touch_event(msg_type, target, logs[group[0]], group[0], tag))\n elif msg_type == 'keyevent':\n events.append(key_processor.create_key_event(msg_type, target, logs[group[0]], group[0]))\n continue\n\n # Activity & Dialig\n if msg_type == 'touchevent':\n event_logs = clear_logs(logs[group[0]:group[1]+1], [ACTIVITY_TAG, DIALOG_TAG, VIEW_TAG])\n ev = touch_processor.parse_touch_event(msg_type, target, event_logs, group[0], tag)\n elif msg_type == 'keyevent':\n event_logs = clear_logs(logs[group[0]:group[1]+1], [ACTIVITY_TAG, DIALOG_TAG])\n ev = key_processor.parse_key_event(msg_type, target, event_logs, group[0])\n events.append(ev)\n elif tag == POPUPWINDOW_TAG:\n # PopupWindow, process view onTouchEvent\n events.append(popup_window_processor.create_popup_window_event(msg_type, target, logs[group[0]], group[0]))\n view_groups = group[4]\n view_events = process_groups(view_groups, logs)\n if len(view_events) != 0:\n events += view_events\n events.append(popup_window_processor.create_popup_window_event(msg_type, target, logs[group[1]], group[1]))\n elif tag == EDITABLE_INPUT_CONNECTION_TAG:\n # Input Event\n nested_groups = group[4]\n # Process nested events\n nested_events = process_groups(nested_groups, logs)\n evs = input_processor.parse_input_event(msg_type, target, logs[group[0]:group[1]+1], nested_events, group[0])\n events += evs\n elif tag == TEXT_VIEW_KEY_TAG:\n # Keyboard event caught by TextView onKeyPreIme\n event_logs = clear_logs(logs[group[0]:group[1]+1], [TEXT_VIEW_KEY_TAG])\n ev = key_processor.parse_key_event(msg_type, target, event_logs, group[0])\n ev.intent = event.KeyEvent.HIDE_KEYBOARD_INTENT\n events.append(ev)\n elif tag == WEBVIEW_KEY_EVENT_TAG:\n # WebView KeyBoard event\n event_logs = logs[group[0]:group[1]+1]\n ev = wv_key_processor.parse_key_event(msg_type, target, event_logs, group[0])\n events.append(ev)\n elif tag == WEBVIEW_CLIENT_TAG:\n # WebView page loaded\n event_logs = logs[group[0]:group[1]+1]\n ev = wv_page_loaded_processor.parse_page_loaded(msg_type, target, event_logs, group[0])\n events.append(ev)\n elif tag == SENSOR_LISTENER_TAG:\n # Low level sensor\n event_logs = logs[group[0]:group[1]+1]\n ev = low_level_sensor_processor.parse_low_level_sensor_event(msg_type, target, event_logs, group[0])\n events.append(ev)\n elif tag == LOCATION_MANAGER_TAG or tag == LOCATION_LISTENER_TAG:\n event_logs = logs[group[0]:group[1]+1]\n ev = location_processor.parse_location_sensor_event(msg_type, target, event_logs, group[0])\n events.append(ev)\n\n return events", "def retrieve_multiple_time_series(self,run='latest',run_data=None,criteria={},timestep='daily',name_fn=name_element_variable):\n if timestep==\"daily\":\n suffix = \"\"\n else:\n suffix = \"/aggregated/%s\"%timestep\n\n if run_data is None:\n run_data = self.retrieve_run(run)\n\n retrieved={}\n def name_column(result):\n col_name = name_fn(result)\n if col_name in retrieved:\n i = 1\n alt_col_name = '%s %d'%(col_name,i)\n while alt_col_name in retrieved:\n i += 1\n alt_col_name = '%s %d'%(col_name,i)\n col_name = alt_col_name\n return col_name\n\n units_store = {}\n for result in run_data['Results']:\n if self.result_matches_criteria(result,criteria):\n d = self.retrieve_json(result['TimeSeriesUrl']+suffix)\n result.update(d)\n col_name = name_column(result)\n# raise Exception(\"Duplicate column name: %s\"%col_name)\n if 'Events' in d:\n retrieved[col_name] = d['Events']\n units_store[col_name] = result['Units']\n else:\n all_ts = d['TimeSeries']\n for ts in all_ts:\n col_name = name_column(ts)\n units_store[col_name] = ts['Units']\n\n vals = ts['Values']\n s = self.parse_veneer_date(ts['StartDate'])\n e = self.parse_veneer_date(ts['EndDate'])\n if ts['TimeStep']=='Daily':\n f='D'\n elif ts['TimeStep']=='Monthly':\n f='M'\n elif ts['TimeStep']=='Annual':\n f='A'\n dates = pd.date_range(s,e,freq=f)\n retrieved[col_name] = [{'Date':d,'Value':v} for d,v in zip(dates,vals)]\n # Multi Time Series!\n\n result = self._create_timeseries_dataframe(retrieved)\n for k,u in units_store.items():\n result[k].units = u\n\n return result", "def _extract_event_data(self, all_runs):\n\n ret_data = []\n\n for run in all_runs:\n run_data, event_data, *_ = run\n run_array = np.zeros((len(run_data['Global']), 16))\n\n previous_time = 0\n\n reg_iters = {\n \"A\": iter(np.array(run_data['RegionA'])),\n \"B\": iter(np.array(run_data['RegionB'])),\n \"C\": iter(np.array(run_data['RegionC']))\n }\n regs = {\n \"A\": next(reg_iters[\"A\"]),\n \"B\": next(reg_iters[\"B\"]),\n \"C\": next(reg_iters[\"C\"])\n }\n\n for i, event_dat in enumerate(event_data):\n run_array[i, 0:4] = regs[\"A\"][[1, 2, 5, 6]]\n run_array[i, 4:8] = regs[\"B\"][[1, 2, 5, 6]]\n run_array[i, 8:12] = regs[\"C\"][[1, 2, 5, 6]]\n\n time, event_id, _, new_state = event_dat\n region = self.nodes[event_id].region\n\n regs[region] = next(reg_iters[region])\n\n run_array[i, 12] = time - previous_time\n previous_time = time\n\n if new_state == State.INF_H or new_state == State.INF_L:\n run_array[i, 13] = 1\n if new_state == State.INF_H:\n run_array[i, 14] = Risk.HIGH\n elif new_state == State.INF_L:\n run_array[i, 14] = Risk.LOW\n\n if region == \"A\":\n run_array[i, 15] = 0\n elif region == \"B\":\n run_array[i, 15] = 1\n elif region == \"C\":\n run_array[i, 15] = 2\n\n run_array[-1, 0:4] = regs[\"A\"][[1, 2, 5, 6]]\n run_array[-1, 4:8] = regs[\"B\"][[1, 2, 5, 6]]\n run_array[-1, 8:12] = regs[\"C\"][[1, 2, 5, 6]]\n run_array[-1, 12] = self.sim_params['end_time'] - previous_time\n run_array[-1, 13] = 0\n\n\n ret_data.append(run_array)\n\n return ret_data", "def build_schedule(solution, new_examiners, new_students):\n examiners = deepcopy(new_examiners)\n students = deepcopy(new_students)\n\n def student_is_available(target_student, target_time, target_duration):\n \"\"\"\n Checks whether a student is available at a given time for a certain duration\n :param target_student: the student\n :param target_time: the time at which the student should be available\n :param target_duration: the duration during which the student should be available\n :return:\n \"\"\"\n for exam, exam_time in target_student.items():\n if exam_time == -1:\n continue\n\n if target_time <= exam_time < target_time + target_duration + delay:\n return False\n elif exam_time <= target_time < exam_time + durations[exam] + delay:\n return False\n\n return True\n\n def examiner_is_available(target_examiner, target_time):\n \"\"\"\n Checks whether an examiner is available at a given time for his exam's duration\n :param target_examiner: the examiner\n :param target_time: the duration during which the examiner should be available\n :return:\n \"\"\"\n examiner_number, examiner_exams = target_examiner[\"Number\"], target_examiner[\"Exams\"]\n\n for _, exam_time in examiner_exams.items():\n if exam_time == -1:\n continue\n\n if target_time <= exam_time < target_time + durations[examiner_number]:\n return False\n elif exam_time <= target_time < exam_time + durations[examiner_number]:\n return False\n\n return True\n\n examiners_order, *students_orders = solution\n\n for j in examiners_order:\n all_set = False\n t = 0\n while not all_set:\n all_set = [examiners[j][\"Exams\"][i] != -1 for i in range(student_count)] == [True] * student_count\n placed = False\n for student in students_orders[j]:\n if examiners[j][\"Exams\"][student] != -1:\n continue\n\n if student_is_available(students[student], t, durations[j]):\n if examiner_is_available(examiners[j], t):\n placed = True\n students[student][j] = t\n examiners[j][\"Exams\"][student] = t\n break\n\n if not placed:\n t += 1\n else:\n t += durations[j]\n\n return examiners, students", "def condense_meeting_times_2(arr):\n\n # sort the meeting times by start time (this will be O(lg(n)), at least)\n # without sorting by start times, the random order will make this O(n^2)\n arr.sort()\n\n # make a list to store output\n output = [arr[0]]\n\n # iterate over all the time blocks and check for merges\n for time_block in arr[1:]:\n # get the times to compare against from the latest block in output\n first_start, first_stop = output[-1]\n # unpack the current time block being assessed for overlap\n second_start, second_stop = time_block\n # if the current time block overlaps with most recent, condense the two\n # by updating the entire tuple in the output list with latest time\n if second_start <= first_stop:\n output[-1] = (first_start, max(first_stop, second_stop))\n # else, there was no overlap. Add current to output and continue loop\n else:\n output.append((second_start, second_stop))\n\n return output", "def test_split_ranges(self):\n start = datetime.utcnow() - pd.Timedelta(\"5H\")\n end = datetime.utcnow() + pd.Timedelta(\"5min\")\n delta = pd.Timedelta(\"1H\")\n\n ranges = QueryProvider._calc_split_ranges(start, end, delta)\n self.assertEqual(len(ranges), 5)\n self.assertEqual(ranges[0][0], start)\n self.assertEqual(ranges[-1][1], end)\n\n st_times = [start_tm[0] for start_tm in ranges]\n for end_time in (end_tm[1] for end_tm in ranges):\n self.assertNotIn(end_time, st_times)\n\n end = end + pd.Timedelta(\"20min\")\n ranges = QueryProvider._calc_split_ranges(start, end, delta)\n self.assertEqual(len(ranges), 5)\n self.assertEqual(ranges[0][0], start)\n self.assertEqual(ranges[-1][1], end)", "def kml_timespan(t1,t2,event_time=None,tz=None,tscale=1):\n\n t1 = t1*tscale # Time converted to seconds\n t2 = t2*tscale\n\n import time\n # to adjust time from UTC to time in event locale.\n if event_time == None:\n # Use local time.\n starttime = time.mktime(time.localtime()) # seconds UTC\n tz_offset = time.timezone/3600.0 # in seconds\n else:\n ev = tuple(event_time) + (0,0,0) # Extend to 9 tuple; no DST\n # mktime returns time in seconds + timezone offset, i.e. seconds UTC\n # Subtract out the timezone offset here, since it will get added back\n # in when we do gmtime(starttime + ...) below.\n starttime = time.mktime(ev) - time.timezone\n if tz is None:\n print(\"===> Time zone offset not defined; assuming zero offset. \" \\\n \"Set plotdata.kml_tz_offset to define an offset (in hours) from \"\\\n \"UTC (positive west of UTC; negative east of UTC)\")\n tz = 0\n\n tz_offset = tz\n\n if (tz_offset == None):\n tzstr = \"Z\" # no offset; could also just set to \"+00:00\"\n else:\n # Google Earth will show time slider time in local time, where\n # local + offset = UTC.\n tz_offset = tz_offset*3600. # Offset in seconds\n tz = time.gmtime(abs(tz_offset))\n if (tz_offset > 0):\n tzstr = time.strftime(\"+%H:%M\",tz) # Time to UTC\n else:\n tzstr = time.strftime(\"-%H:%M\",tz)\n\n # Get time strings for start and end of time span\n gbegin = time.gmtime(starttime + t1)\n timestrbegin = \"%s%s\" % (time.strftime(\"%Y-%m-%dT%H:%M:%S\", gbegin),tzstr)\n\n gend = time.gmtime(starttime + t2)\n timestrend = \"%s%s\" % (time.strftime(\"%Y-%m-%dT%H:%M:%S\", gend),tzstr)\n\n return timestrbegin,timestrend", "def condense_meeting_times(arr):\n\n # make a list to store output\n output = []\n\n # sort the meeting times by start time (this will be O(n), at least)\n # without sorting by start times, the random order will make this O(n^2)\n arr.sort()\n\n # iterate over all the time blocks and check for merges\n for time_block in arr[:-1]:\n # unpack the first two meeting time\n first_start, first_stop = time_block\n # unpack the next time block into start and stop times\n second_start, second_stop = arr[arr.index(time_block) + 1]\n # if there is overlap, condense them\n if second_start <= first_stop:\n # the second one has the later stop\n if second_stop > first_stop:\n output.append((first_start, second_stop))\n # else, the first one had the later stop\n output.append((first_start, first_stop))\n # else, there wasn't overlap\n # append first meeting to output, and continue for loop\n output.append((first_start, first_stop))\n\n return output", "def test_overlapping_events(sample_events, woodshop, caplog):\n caplog.set_level(logging.INFO)\n event1, event2 = sample_events.make_overlapping_events()\n overlap_events(event1, event2, woodshop, woodshop, [woodshop])\n assert len(caplog.messages) == 1\n message = caplog.messages[0]\n assert \"Schedule conflict: place='Woodshop'\" in message\n expected_conflict_times = \"Conflict(start_time='{}', end_time='{}',\".format(\n event2.start_time, event1.end_time)\n assert expected_conflict_times in message\n assert event1.meetup_id in message\n assert event2.meetup_id in message", "def create_global_time_series(self, times, job_class_regex=None):\n\n global_time_series = {}\n bin_width = times[1] - times[0]\n found = 0\n tmin_epochs = self.tmin_epochs\n\n for ts_name in signal_types:\n\n _values = [0]*len(times)\n _elapsed = [0]*len(times)\n _processes = [0]*len(times)\n\n for jj, job in enumerate(self.jobs):\n\n if job.label:\n if job.is_in_class(job_class_regex):\n\n found += 1\n\n if job.time_series.get(ts_name):\n\n # job_ts_timestamps includes the t_0 of each interval\n job_ts_timestamps = [0] + job.time_series[ts_name][\"times\"]\n t_start = job.t_start\n\n for tt in range(1, len(job_ts_timestamps)):\n\n first = int(math.floor((job_ts_timestamps[tt-1] + t_start-tmin_epochs) / bin_width))\n last = int(math.ceil((job_ts_timestamps[tt] + t_start-tmin_epochs) / bin_width))\n\n # make sure that last is always > first\n last = last if last > first else first + 1\n\n # make sure that n_span_bin is >= 1\n n_span_bin = max(1, last-first)\n\n # counter to get the value corresponding to the time interval\n value_count = tt-1\n\n _values = add_value_to_sublist(_values, first, last, job.time_series[ts_name][\"values\"][value_count]/float(n_span_bin))\n _elapsed = add_value_to_sublist(_elapsed, first, last, job.time_series[ts_name][\"elapsed\"][value_count]/float(n_span_bin))\n _processes = add_value_to_sublist(_processes, first, last, 1)\n\n global_time_series[ts_name] = {\"times\": times,\n \"values\": _values,\n \"elapsed\": _elapsed,\n \"processes\": _processes}\n\n return found, global_time_series", "def get_pd_schedule(schedId, ts_in=None, ts_out=None, debug=False): # ts_... => Timestamps ...\n\n sch_beg = sch_end = 0\n\n if args.debug:\n print \"\\nEntering function with: \\nts_in: %s\\nts_out: %s\\n\" % (ts_in, ts_out) # debug\n print \"1-type(ts_in): \", ; print type(ts_in) # debug\n print \"1-type(ts_out): \", ; print type(ts_out) # debug\n\n ############################################################################\n # Use current time if timestamp range not specified ...\n ############################################################################\n now1 = dateutil.parser.parse(commands.getoutput(\"date +%Y-%m-%dT%H:%M:%S%:z\"))\n if args.debug:\n print \"Now1: %s\" % now1\n #print \"Now1: %s\" % now1.strftime(\"%Y-%m-%dT%H:%M:%S%z\") # debug\n\n ### ts_in is null or == '0' or == 'now' ...\n if not ts_in or ts_in.lower() == 'now' or ts_in == '0':\n ts_in = now1\n if args.debug: print \"\\n2-Timestamps=='now' or 0 or null, using current time:\\nts_in: %s\\n\" % (ts_in) # debug\n else: ts_in = dateutil.parser.parse(ts_in)\n\n ### ts_out is null or == '0' or 'now' ...\n if not ts_out or ts_out.lower() == 'now' or ts_out == '0':\n ts_out = now1\n if args.debug: print \"\\n2-Timestamps=='now' or 0 or null, using current time:\\nts_out: %s\\n\" % (ts_out) # debug\n else: ts_out = dateutil.parser.parse(ts_out)\n\n if args.debug:\n print \"2-type(ts_in): \", ; print type(ts_in) # debug\n print \"2-type(ts_out): \", ; print type(ts_out) # debug\n\n ############################################################################\n ### Build the PagerDuty API URL with specific schedule start/stop datetime (include whole shift) ...\n ############################################################################\n url = \"https://my_company.pagerduty.com/api/v1/schedules/%s/entries?since=%s&until=%s&overflow=true\" % (\n schedId,\n ts_in.strftime('%Y-%m-%dT%H:%M:%S%z'),\n ts_out.strftime('%Y-%m-%dT%H:%M:%S%z'))\n if args.debug:\n print \"URL: %s\" % url # debug\n\n ############################################################################\n ### Query the pager Duty service (upto 5 times if necessary) ...\n ############################################################################\n for x in xrange(1,5):\n try:\n if args.debug:\n print \"Trying PagerDuty url. Attempt no: %s\" % x # debug\n r = requests.get(url, auth=('pagerdutyapiuser@my_company.com', 'my_company12345678')) # my_company12345678 => PD-Token\n break\n except Exception, e:\n alogger.warning( \"Warning - PagerDuty API connection problem (attempt: %s of 5): %s %s\" % (x, Exception, e) )\n try:\n send_email(ses, email, e)\n except Exception1, ex1:\n print \"Failed to send mail to %s: %s %s\" % (email, Exception1, ex1)\n else: ### Bail out if too stubborn ...\n alogger.error( \"Error - Cannot recover from PagerDuty API connection problem (after trying %s/5): %s %s\" % (x, Exception, e) )\n sys.exit(1)\n\n content = json.loads(r.content)\n\n if args.debug:\n print json.dumps(content, sort_keys=True, separators=(', ', ': '), indent=4) # debug\n #print json.dumps(json.load(f1), sort_keys=True, separators=(', ', ': '), indent=4) # debug\n #-OR-\n #s = json.dumps(json.loads(r.content), sort_keys=True, separators=(', ', ' : '), indent=4)\n #print '\\n'.join([l.rstrip() for l in s.splitlines()])\n\n if content.has_key(\"error\"):\n send_email(ses, email,\n \"<dl><dt><b>Error:</b></dt><dd>Couldn't connect to PagerDuty: %s.</dd>\\\n <dt><b>Result:</b></dt><dd>Phone_Ctlr was NOT switched over.</dd>\\\n <dt><b>Troubleshooting:</b></dt><dd>Review connectivity to PagerDuty URL:<br />%s</dd></dl>\" % (str(content.get(\"error\").get(\"message\")), url)\n )\n\n #send_email(ses, email,\n # \"Couldn't connect to PagerDuty: %s.\" % str(content.get(\"error\").get(\"message\")),\n # \"Result: Phone_Ctlr was NOT switched over.\",\n # \"Troubleshooting: Review connectivity to PagerDuty URL:<br />%s\" % url\n #)\n\n sys.exit(2)\n\n ############################################################################\n ### If 'total entries' >1: this means it is time to switch Phone_Ctlr ...\n ############################################################################\n if content.get('total') > 1:\n if args.debug:\n print \"Time to change Phone_Ctlr setup\"\n else:\n if args.debug:\n print \"Wait to change Phone_Ctlr setup\"\n\n who1 = who2 = None\n cnt=0\n\n for entry in content.get('entries'): # From URL\n if args.debug:\n print \"\\nEntry \" + str(cnt) # debug\n print \"Name: %-20s (Id: %8s) -- Start: %s -> End: %s\" % (entry.get('user').get('name'), # debug\n entry.get('user').get('id'), # debug\n entry.get('start'), # debug\n entry.get('end')) # debug\n if cnt == 0: who1 = entry.get('user').get('name')\n who2 = who1\n if cnt == 1: who2 = entry.get('user').get('name')\n cnt += 1\n\n sch_beg = str(dateutil.parser.parse(entry.get('start')))\n sch_end = str(dateutil.parser.parse(entry.get('end')))\n\n return (who1, who2, sch_beg, sch_end)", "def create_time_slices(weeks, lookback, horizon, gap,\n step_size, holdout_window, num_steps):\n\n n = len(weeks)\n min_week = min(weeks)\n holdout_gap = horizon + gap - 1 # gap between train and holdout set\n holdout_size = horizon + holdout_window - 1\n step_space = (num_steps - 1) * step_size\n\n training_window = n - lookback - holdout_gap - holdout_size - step_space\n\n if training_window <= 0:\n err_msg = \"negative window size using specified parameters\"\n logging.error(err_msg)\n raise Exception(err_msg)\n\n def create_time_slice(step=0):\n base = min_week + lookback + step\n time_slice = (\n [base + x for x in range(training_window)],\n [base + x + holdout_gap + training_window\n for x in range(holdout_window)]\n )\n return time_slice\n\n output = [create_time_slice(x*step_size) for x in range(0, num_steps)]\n\n return output", "def __init__(self, start_time, end_time, events, **other_fields):\n self._start_time = start_time\n self._end_time = end_time\n self._events = events", "def increment_time(self, **kwargs):\n \n #Pull all optional keyword arguements\n if 'timerange' in kwargs:\n timerange = kwargs.pop('timerange')\n else:\n timerange = 7\n \n if 'display' in kwargs:\n displayflag = kwargs.pop('display')\n else:\n displayflag = 1\n \n if 'auto' in kwargs:\n autoflag = kwargs.pop('auto')\n else:\n autoflag = 0\n \n if 'triggered' in kwargs:\n triggered_rules = kwargs.pop('triggered')\n else:\n triggered_rules = []\n \n #Run simulation one day at a time until specified end point is reached\n count = range(0,timerange)\n for i in count:\n \n \n #Increment one day if at least one infected person remains. If not, end the simulation\n if self.SD_Map.IPop.value() > 1:\n time = self.timeSeries[-1]\n self.timeSeries.append(time+1)\n self.SD_Map.update_all(self.timestep(), len(self.timeSeries)-2)\n else:\n print('Done!')\n \n #Update the time display\n self.timev.set(self.timeSeries[-1])\n \n #Add any triggered rules to the rule log display\n if triggered_rules != []:\n day_text = self.translate('Day')+' ' + str(self.timeSeries[-1]) \n rule_text = '; ' + self.translate('Rules') + ': ' + str(triggered_rules)[1:-1]\n log_text = day_text + rule_text\n self.list_info_boxes['Log'].insert(tk.END, log_text)\n \n #If appropriate, update all of the graphs\n if displayflag == 1:\n if self.arrangment == ['Map', 'Graph']:\n index = 2\n invertflag = 1\n else:\n index = 0\n invertflag = 0\n \n #Select all of the graphs\n canvaslist = []\n for entrylist in self.graph_canvas_list:\n for entry in entrylist:\n canvaslist.append(entry)\n\n #For each graph, delete it and replace it with an update graph\n for canvas in canvaslist:\n if index < 2:\n col = 0\n inputindex = index\n self.figures[index].clear()\n plt.close(self.figures[index])\n else:\n col = 1\n inputindex = index - 2\n if invertflag:\n self.figures[inputindex].clear()\n plt.close(self.figures[inputindex])\n else:\n self.figures[index].clear()\n plt.close(self.figures[index])\n \n #Make new graph\n framename = canvas.get_tk_widget().master\n canvas.get_tk_widget().destroy()\n graph = self.translate(self.graph_setting_list[col][inputindex].get(),\n input_language=self.language,\n output_language='english')\n canvas,fig = self.make_graph(framename, graph,\n gridpos = inputindex*2+1)\n self.graph_canvas_list[col][inputindex]=canvas\n \n #Update figures list\n if invertflag:\n self.figures[inputindex] = fig\n else:\n self.figures[index] = fig\n index += 1", "def summarize(self, data, order=11, verbose=False):\n self.intervals = np.diff(self.timebase[self.onsets]) # event intervals\n i_decay_pts = int(2*self.taus[1]/self.dt) # decay window time (points)\n self.peaks = []\n self.smpkindex = []\n self.smoothed_peaks = []\n self.amplitudes = []\n self.Qtotal = []\n self.averaged = False # set flags in case of no events found\n self.individual_events = False\n self.fitted = False\n self.fitted_tau1 = np.nan\n self.fitted_tau2 = np.nan\n self.Amplitude = np.nan\n self.avg_fiterr = np.nan\n ndata = len(data)\n avgwin = 5 # int(1.0/self.dt) # 5 point moving average window for peak detection\n# print('dt: ', self.dt)\n mwin = int((0.050)/self.dt)\n# print('mwin: ', mwin)\n #order = int(0.0004/self.dt)\n # print('onsets: ', self.onsets)\n if self.sign > 0:\n nparg = np.greater\n else:\n nparg = np.less\n if len(self.onsets) > 0: # original events\n# print('no: ', len(self.onsets))\n acceptlist = []\n for j in range(len(data[self.onsets])):\n if self.sign > 0 and self.eventstartthr is not None:\n if self.data[self.onsets[j]] < self.eventstartthr:\n continue\n if self.sign < 0 and self.eventstartthr is not None:\n if self.data[self.onsets[j]] > -self.eventstartthr:\n continue\n svwinlen = data[self.onsets[j]:(self.onsets[j]+mwin)].shape[0]\n if svwinlen > 11:\n svn = 11\n else:\n svn = svwinlen\n if svn % 2 == 0: # if even, decrease by 1 point to meet ood requirement for savgol_filter\n svn -=1\n \n if svn > 3: # go ahead and filter\n p = scipy.signal.argrelextrema(scipy.signal.savgol_filter(data[self.onsets[j]:(self.onsets[j]+mwin)], svn, 2), nparg, order=order)[0]\n else: # skip filtering\n p = scipy.signal.argrelextrema(data[self.onsets[j]:(self.onsets[j]+mwin)], nparg, order=order)[0]\n if len(p) > 0:\n self.peaks.extend([int(p[0]+self.onsets[j])])\n amp = self.sign*(self.data[self.peaks[-1]] - data[self.onsets[j]])\n\n self.amplitudes.extend([amp])\n i_end = i_decay_pts + self.onsets[j] # distance from peak to end\n i_end = min(ndata, i_end) # keep within the array limits\n if j < len(self.onsets)-1:\n if i_end > self.onsets[j+1]:\n i_end = self.onsets[j+1]-1 # only go to next event start\n move_avg, n = moving_average(data[self.onsets[j]:i_end], n=min(avgwin, len(data[self.onsets[j]:i_end])))\n if self.sign > 0:\n pk = np.argmax(move_avg) # find peak of smoothed data\n else:\n pk = np.argmin(move_avg)\n self.smoothed_peaks.extend([move_avg[pk]]) # smoothed peak\n self.smpkindex.extend([self.onsets[j]+pk])\n acceptlist.append(j)\n if len(acceptlist) < len(self.onsets):\n if verbose:\n print('Trimmed %d events' % (len(self.onsets)-len(acceptlist)))\n self.onsets = self.onsets[acceptlist] # trim to only the accepted values\n # print(self.onsets)\n self.avgevent, self.avgeventtb, self.allevents = self.average_events(self.onsets) \n if self.averaged:\n self.fit_average_event(self.avgeventtb, self.avgevent, debug=False)\n \n else:\n if verbose:\n print('No events found')\n return", "def find_appointment_times(person_1_cal, person_2_cal, availability_bounds):\n # Helper function to translate string times into decimal\n def ttd(time):\n hour, minute = time.split(\":\")\n if minute == \"30\":\n return float(hour) + 0.5\n else:\n return float(hour)\n \n\n # Helper function to translate decimal times to string\n def dtt(dec):\n hour = math.floor(dec)\n minute = str(dec).split(\".\")[1]\n if minute == \"5\":\n return f\"{hour}:30\"\n else:\n return f\"{hour}:00\"\n\n\n # Translate both calendars to decimal\n cal_1_dec = [[ttd(appt[0]), ttd(appt[1])] for appt in person_1_cal]\n cal_2_dec = [[ttd(appt[0]), ttd(appt[1])] for appt in person_2_cal]\n\n # Find all possible 30 minute appointments\n \n # Then remove appoin\n \n # Helper to check if potential appointment would conflict with current appointments\n # Returns True if no conflicts\n def does_not_conflict(start_time, end_time):\n # Check first calendar\n for appt in cal_1_dec:\n appt_start, appt_end = appt\n # Does the potential appointment start during another appt\n if appt_start <= start_time and start_time < appt_end:\n return False\n # Does the potential appointment end during another appt\n if appt_start < end_time and end_time <= appt_end:\n return False\n # Check second calendar\n for appt in cal_2_dec:\n appt_start, appt_end = appt\n # Does the potential appointment start during another appt\n if appt_start <= start_time and start_time < appt_end:\n return False\n # Does the potential appointment end during another appt\n if appt_start < end_time and end_time <= appt_end:\n return False\n # If we make it all the way through, no conflicts return True\n print(f\"No conflict with {[dtt(start_time), dtt(end_time)]}\")\n return True\n\n # Destructure availability_bounds\n earliest_availability, latest_availability = availability_bounds\n latest_availability = ttd(latest_availability)\n\n # Initialize potential_start_time to be be first bound\n potential_start_time = ttd(earliest_availability)\n # Initialize valid_appts to keep track of appointments that work for both people\n valid_appts = []\n\n # Loop with end condition when potential_start_time is after latest availability\n while potential_start_time <= latest_availability - 1:\n print(f\"Potential start time: {potential_start_time}\")\n # Set potential_end_time to be an hour after potential_start_time\n potential_end_time = potential_start_time + 1\n\n # Check to see if there are any conflicts with the two calendars\n if does_not_conflict(potential_start_time, potential_end_time):\n valid_appts.append([dtt(potential_start_time), dtt(potential_end_time)])\n\n # Add 30 minutes to potential_start_time\n potential_start_time += 0.5\n \n return valid_appts", "def findontarget(starttime, event_list):\n for r in event_list:\n if r[0]==18 and r[1]>starttime: return r[1]\n return None", "def generate_fire_time_series(self):\n\n self.fire_events =[]\n event = self.generate_fire_recurrence()\n end_event = event + 365.0\n self.fire_events.append([event, end_event])\n t = 0\n i = 0\n while t <= self.total_run_time:\n fire = self.generate_fire_recurrence()\n start_fire = self.fire_events[i][0] + (fire)\n end_fire = start_fire + (365.0)\n self.fire_events.append([start_fire, end_fire])\n t += end_fire\n i+=1", "def get_itds_v2(timestamps, ears, types, max_itd=800e-6, save_to_file=None, verbose=False, return_attributes=False):\n ears = ears.astype(np.bool)\n itds_to_return, timestamps_to_return, ears_to_return, types_to_return = [], [], [], []\n\n timestamps_dict = {}\n timestamp_indices_dict = {}\n for ear in np.unique(ears):\n timestamps_dict[ear] = {}\n timestamp_indices_dict[ear] = {}\n for type_of_event in np.unique(types):\n timestamps_dict[ear][type_of_event] = []\n timestamp_indices_dict[ear][type_of_event] = []\n\n for idx, (timestamp, ear, type_of_event) in enumerate(zip(timestamps, ears, types)):\n timestamps_dict[ear][type_of_event].append(timestamp)\n timestamp_indices_dict[ear][type_of_event].append(idx)\n\n if verbose:\n print('Initialized the timestamp lists.')\n\n bar = progressbar.ProgressBar() if verbose else lambda x: x\n\n for type_of_event in bar(np.unique(types)):\n timestamps_left = np.array(timestamps_dict[True][type_of_event])\n timestamp_indices_left = timestamp_indices_dict[True][type_of_event]\n timestamps_right = np.array(timestamps_dict[False][type_of_event])\n timestamp_indices_right = timestamp_indices_dict[False][type_of_event]\n\n for ts_right, ts_idx_right in zip(timestamps_right, timestamp_indices_right):\n matched_indices = np.where((timestamps_left >= ts_right - max_itd) &\n (timestamps_left < ts_right + max_itd))[0]\n for matched_index in matched_indices:\n matched_itd = ts_right - timestamps_left[matched_index]\n itds_to_return.append(matched_itd)\n timestamps_to_return.append(ts_right)\n ears_to_return.append(False)\n types_to_return.append(type_of_event)\n\n for ts_left, ts_idx_left in zip(timestamps_left, timestamp_indices_left):\n matched_indices = np.where((timestamps_right >= ts_left - max_itd) &\n (timestamps_right < ts_left + max_itd))[0]\n for matched_index in matched_indices:\n matched_itd = timestamps_right[matched_index] - ts_left\n itds_to_return.append(matched_itd)\n timestamps_to_return.append(ts_left)\n ears_to_return.append(True)\n types_to_return.append(type_of_event)\n\n indices = np.argsort(timestamps_to_return)\n timestamps_to_return = np.array(timestamps_to_return, dtype=np.float32)[indices]\n itds_to_return = np.array(itds_to_return, dtype=np.float32)[indices]\n types_to_return = np.array(types_to_return, dtype=np.int16)[indices]\n ears_to_return = np.array(ears_to_return, dtype=np.int8)[indices]\n\n if save_to_file is not None:\n np.savez(save_to_file, timestamps=timestamps_to_return, ears=ears_to_return,\n types=types_to_return, itds=itds_to_return)\n\n if return_attributes:\n return itds_to_return, timestamps_to_return, ears_to_return, types_to_return\n\n return itds_to_return", "def getDaySummaryVectors(db_manager, sql_type, timespan, agg_list='max'):\n\n # Get our interpolation dictionary for the query\n interDict = {'start' : weeutil.weeutil.startOfDay(timespan.start),\n 'stop' : timespan.stop,\n 'table_name' : 'archive_day_%s' % sql_type}\n # Setup up a list of lists for our vectors\n _vec = [list() for x in range(len(agg_list))]\n # Initialise each list in the list of lists\n for agg in agg_list:\n _vec[agg_list.index(agg)] = list()\n # Setup up our time vector list\n _time_vec = list()\n # Initialise a dictionary for our results\n _return = {}\n # Get the unit system in use\n _row = db_manager.getSql(\"SELECT usUnits FROM %s LIMIT 1;\" % db_manager.table_name)\n std_unit_system = _row[0] if _row is not None else None\n # Get a cursor object for our query\n _cursor=db_manager.connection.cursor()\n try:\n # Put together our SQL query string\n sql_str = \"SELECT * FROM %(table_name)s WHERE dateTime >= %(start)s AND dateTime < %(stop)s\" % interDict\n # Loop through each record our query returns\n for _rec in _cursor.execute(sql_str):\n # Loop through each aggregate we have been asked for\n for agg in agg_list:\n # Calculate the aggregate\n if agg == 'min':\n _result = _rec[1]\n elif agg == 'max':\n _result = _rec[3]\n elif agg == 'sum':\n _result = _rec[5]\n elif agg == 'gustdir':\n _result = _rec[7]\n elif agg == 'mintime':\n _result = int(_rec[2]) if _rec[2] else None\n elif agg == 'maxtime':\n _result = int(_rec[4]) if _rec[4] else None\n elif agg == 'count':\n _result = int(_rec[6]) if _rec[6] else None\n elif agg == 'avg' :\n _result = _rec[5]/_rec[6] if (_rec[5] and _rec[6]) else None\n elif agg == 'rms' :\n _result = math.sqrt(_rec[10]/_rec[11]) if (_rec[10] and _rec[11]) else None\n elif agg == 'vecavg' :\n _result = math.sqrt((_rec[8]**2 + _rec[9]**2) / _rec[6]**2) if (_rec[6] and _rec[8] and _rec[9]) else None\n elif agg == 'vecdir' :\n if _rec[8] == 0.0 and _rec[9] == 0.0:\n _result = None\n elif _rec[8] and _rec[9]:\n deg = 90.0 - math.degrees(math.atan2(_rec[9], _rec[8]))\n _result = deg if deg >= 0.0 else deg + 360.0\n else:\n _result = None\n # If we have not found it then return None\n else:\n _result = None\n # Add the aggregate to our vector\n _vec[agg_list.index(agg)].append(_result)\n # Add the time to our time vector\n _time_vec.append(_rec[0])\n finally:\n # Close our cursor\n _cursor.close()\n # Get unit type and group for time\n (_time_type, _time_group) = weewx.units.getStandardUnitType(std_unit_system, 'dateTime')\n # Loop through each aggregate we were asked for getting unit and group and producing a ValueTuple\n # and adding to our result dictionary\n for agg in agg_list:\n (t,g) = weewx.units.getStandardUnitType(std_unit_system, sql_type, agg)\n _return[agg]=ValueTuple(_vec[agg_list.index(agg)], t, g)\n # Return our time vector and dictionary of aggregate vectors\n return (ValueTuple(_time_vec, _time_type, _time_group), _return)", "def extract_tt_by_periods(ttri, periods, start_time, end_time, filters):\n logger = getLogger(__name__)\n # sess = conn.get_session()\n das = {}\n all_wz_features = {}\n all_wz_laneconfigs = {}\n\n # collecting daily data\n for prd in periods:\n logger.debug('>>>> retrieving data for %s' % prd.get_date_string())\n year = prd.start_date.year\n sdate = prd.start_date\n edate = prd.end_date\n if year not in das:\n da_tt = tt.TravelTimeDataAccess(year)\n da_tt_wz = tt_workzone.TTWorkZoneDataAccess(year)\n da_tt_wz_feature = wz_feature.WZFeatureDataAccess()\n da_tt_wz_lncfg = wz_laneconfig.WZLaneConfigDataAccess()\n da_tt_weather = tt_weather.TTWeatherDataAccess(year)\n da_tt_snowmgmt = tt_snowmgmt.TTSnowManagementDataAccess(year)\n da_tt_incident = tt_incident.TTIncidentDataAccess(year)\n da_tt_specialevent = tt_specialevent.TTSpecialeventDataAccess(year)\n das[year] = (\n da_tt, da_tt_wz, da_tt_wz_feature, da_tt_wz_lncfg, da_tt_weather, da_tt_snowmgmt, da_tt_incident,\n da_tt_specialevent)\n\n (da_tt, da_tt_wz, da_tt_wz_feature, da_tt_wz_lncfg, da_tt_weather, da_tt_snowmgmt, da_tt_incident,\n da_tt_specialevent) = das[year]\n\n # traveltimes = da_tt.list_by_period(ttri.id, self.prd)\n weathers = da_tt_weather.list(ttri.id, sdate, edate, as_model=True)\n \"\"\":type: list[pyticas_tetres.ttrms_types.WeatherInfo] \"\"\"\n workzones = da_tt_wz.list(ttri.id, sdate, edate, as_model=True)\n \"\"\":type: list[pyticas_tetres.ttrms_types.WorkZoneInfo] \"\"\"\n incidents = da_tt_incident.list(ttri.id, sdate, edate, as_model=True)\n \"\"\":type: list[pyticas_tetres.ttrms_types.IncidentInfo] \"\"\"\n snowmgmts = da_tt_snowmgmt.list(ttri.id, sdate, edate, as_model=True)\n \"\"\":type: list[pyticas_tetres.ttrms_types.SnowManagementInfo] \"\"\"\n specialevents = da_tt_specialevent.list(ttri.id, sdate, edate, as_model=True)\n \"\"\":type: list[pyticas_tetres.ttrms_types.SpecialEventInfo] \"\"\"\n traveltimes = da_tt.list_by_period(ttri.id, prd)\n \"\"\":type: list[pyticas_tetres.ttrms_types.TravelTimeInfo] \"\"\"\n\n if not any(weathers):\n logger.debug('>>>> end of retrieving data for %s (no weather data)' % prd.get_date_string())\n continue\n\n extras = {\n 'weathers': {_tt.id: [] for _tt in traveltimes},\n 'workzones': {_tt.id: [] for _tt in traveltimes},\n 'incidents': {_tt.id: [] for _tt in traveltimes},\n 'specialevents': {_tt.id: [] for _tt in traveltimes},\n 'snowmgmts': {_tt.id: [] for _tt in traveltimes},\n }\n \"\"\":type: dict[str, dict[int, list]]\"\"\"\n\n _put_to_bucket(ttri, weathers, extras['weathers'], da_tt_weather, year, all_wz_features, all_wz_laneconfigs, das)\n _put_to_bucket(ttri, workzones, extras['workzones'], da_tt_wz, year, all_wz_features, all_wz_laneconfigs, das)\n _put_to_bucket(ttri, incidents, extras['incidents'], da_tt_incident, year, all_wz_features, all_wz_laneconfigs, das)\n _put_to_bucket(ttri, snowmgmts, extras['snowmgmts'], da_tt_snowmgmt, year, all_wz_features, all_wz_laneconfigs, das)\n _put_to_bucket(ttri, specialevents, extras['specialevents'], da_tt_specialevent, year, all_wz_features, all_wz_laneconfigs, das)\n\n for tti in traveltimes:\n _tt_weathers = extras['weathers'][tti.id]\n extdata = ExtData(tti,\n _tt_weathers[0] if _tt_weathers else None,\n extras['incidents'][tti.id],\n extras['workzones'][tti.id],\n extras['specialevents'][tti.id],\n extras['snowmgmts'][tti.id])\n\n if start_time <= tti.str2datetime(tti.time).time() <= end_time:\n for ef in filters:\n try:\n ef.check(extdata)\n except Exception as ex:\n tb.traceback(ex)\n logger.debug('>>>> end of retrieving data for %s (error occured 1)' % prd.get_date_string())\n continue\n else:\n for ef in filters:\n try:\n ef.check_outofrange(extdata)\n except Exception as ex:\n tb.traceback(ex)\n logger.debug('>>>> end of retrieving data for %s (error occured 2)' % prd.get_date_string())\n continue\n\n del extras\n logger.debug('>>>> end of retrieving data for %s' % prd.get_date_string())\n\n # sess.close()", "def get_times(ts_full, ts_system, len_state, sys_position, sys_length):\n ts = list(ts_full) + list(ts_system)\n subsystems = [[0, len_state]] * len(ts_full) + \\\n [[sys_position, sys_position + sys_length]] * len(ts_system)\n return ts, subsystems", "def operation_on_events(path_to_data_dir, list_of_test_id, operation, num_of_proc=1):\n\ttest_id = [\"test%s\"%i for i in list_of_test_id]\n\tpool = Pool(processes = num_of_proc)\n\tpath_to_final_selected_events = path_to_data_dir + \"final_selected_events.json\"\n\tif os.path.exists(path_to_final_selected_events):\n\t\tfinal_selected_events = json.load(open(path_to_final_selected_events,\"r\"))\n\t\tfinal_interested_events = []\n\t\tfor event in final_selected_events:\n\t\t\tif event[0] in test_id:\n\t\t\t\tfinal_interested_events.append(event)\n\telse:\n\t\tfinal_interested_events = []\n\t\tfor test in list_of_test_id:\n\t\t\tpath_to_curr_test = data_dir_to_test_dir(path_to_data_dir, test)\n\t\t\tpath_to_test_result = path_to_curr_test +\"/results\"\n\t\t\tpath_to_event_list = path_to_test_result + \"/selected_events.json\"\n\t\t\tif os.path.exists(path_to_event_list):\n\t\t\t\tevent_list = json.load(open(path_to_event_list,\"r\"))\n\t\t\t\tfor value in event_list.values():\n\t\t\t\t\tevent = [\"test%s\"%test,[value[0],value[1],value[2]]]\n\t\t\t\t\tfinal_interested_events.append(event)\n\t\t\telse:\n\t\t\t\tprint \"skip current test:\", \"test%s\"%test, \"there is no selected events\"\n\t\n\t# if function operation has no return value, it will return a list of Nones\n\tresult_list = pool.map(operation,final_interested_events)\n\treturn result_list", "def test_month_starts_and_ends(self):\n # @REVIEWED\n # @todo Optimize by combine start and end tests.\n\n startCnt = 0\n endCnt = 0\n\n def test_starts(timeColName, dataType):\n global startCnt\n self.logger.log('testing {},{}'.format(timeColName, dataType))\n\n # Take every other value from the unzipped pairs.\n starts = [x for x in itertools.islice(\n zip(*self.aggregator.monthStartsAndEnds(timeColName, dataType)),\n 0, None, 2)]\n startCnt = len(starts)\n\n # Test on the flattened start values.\n self.assertLessEqual(len(filter(\n lambda x: x.time() != datetime.strptime('00:00:00',\n '%H:%M:%S').time(),\n list(itertools.chain.from_iterable(starts)))), 1)\n\n def test_ends(timeColName, dataType):\n global endCnt\n self.logger.log('testing {},{}'.format(timeColName, dataType))\n\n # Take every other value from the unzipped pairs.\n ends = [x for x in itertools.islice(\n zip(*self.aggregator.monthStartsAndEnds(timeColName, dataType)),\n 1, None, 2)]\n endCnt = len(ends)\n\n # Test on the flattened end values.\n self.assertLessEqual(len(filter(\n lambda x: x.time() != self.aggregator.incrementEndpoint(\n datetime.strptime('23:59:59', '%H:%M:%S')).time(),\n list(itertools.chain.from_iterable(ends)))), 1)\n\n for myType in ['weather', 'egauge', 'circuit', 'irradiance']:\n if myType == 'egauge':\n test_starts('datetime', myType)\n test_ends('datetime', myType)\n else:\n test_starts('timestamp', myType)\n test_ends('timestamp', myType)\n self.assertEquals(startCnt, endCnt)" ]
[ "0.65961754", "0.6502333", "0.6414507", "0.6089023", "0.5852518", "0.5768563", "0.57619506", "0.57517695", "0.5713987", "0.56655604", "0.56637067", "0.56566614", "0.56286615", "0.55703324", "0.5542513", "0.5491681", "0.5479628", "0.54761803", "0.5450979", "0.54409343", "0.5434103", "0.54248923", "0.5419411", "0.5416304", "0.5399855", "0.5391567", "0.53671277", "0.5356125", "0.53477895", "0.5322029", "0.5309454", "0.529649", "0.5295564", "0.52900994", "0.5280213", "0.5276428", "0.52735144", "0.5248861", "0.522611", "0.52027327", "0.5191958", "0.51915675", "0.51695555", "0.51671314", "0.51578486", "0.51559883", "0.51448613", "0.51438785", "0.5143217", "0.51428837", "0.51390684", "0.5138457", "0.5125276", "0.51226944", "0.5114979", "0.51132494", "0.51076883", "0.5102988", "0.5097462", "0.5090601", "0.50715184", "0.5070099", "0.5066827", "0.5065077", "0.50586426", "0.5053961", "0.50487995", "0.5048145", "0.5042314", "0.5038282", "0.5020537", "0.50058687", "0.50044966", "0.50012326", "0.5000392", "0.49985453", "0.49879038", "0.49846783", "0.49770096", "0.49764457", "0.49743742", "0.49733773", "0.49602646", "0.49592006", "0.4957377", "0.495635", "0.49542972", "0.495278", "0.49462414", "0.49449164", "0.4942607", "0.49404472", "0.49381533", "0.4936983", "0.4936653", "0.4933695", "0.49336547", "0.49335232", "0.49309042", "0.49280918" ]
0.8048312
0
Returning the sync mode
def get_sync_mode(): return sync_mode
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def synchronize_system_mode(self):\n\n return self._synchronize_system_mode", "def sync(self):\n return self._sync", "def isSync(self):\n return False", "def getMode(self):\n with self.lock:\n mode = self.mode\n return mode", "def syncheck(self) :\n\t\ttry :\n\t\t\treturn self._syncheck\n\t\texcept Exception as e:\n\t\t\traise e", "def _get_ldp_sync_enabled(self):\n return self.__ldp_sync_enabled", "def check_sync_mode():\n global sync_mode\n _description = ''\n\n _modes = {\n SyncMode.RECEIVER: '(REMOTE ➔ LOCAL)',\n SyncMode.SENDER: '(LOCAL ➔ REMOTE)',\n SyncMode.PROXY: '(REMOTE ➔ LOCAL ➔ REMOTE)',\n SyncMode.DUMP_LOCAL: '(LOCAL, ONLY EXPORT)',\n SyncMode.DUMP_REMOTE: '(REMOTE, ONLY EXPORT)',\n SyncMode.IMPORT_LOCAL: '(REMOTE, ONLY IMPORT)',\n SyncMode.IMPORT_REMOTE: '(LOCAL, ONLY IMPORT)',\n SyncMode.SYNC_LOCAL: '(LOCAL ➔ LOCAL)',\n SyncMode.SYNC_REMOTE: '(REMOTE ➔ REMOTE)'\n }\n\n for _mode, _desc in _modes.items():\n if getattr(SyncMode, 'is_' + _mode.lower())():\n sync_mode = _mode\n _description = _desc\n\n if is_import():\n output.message(\n output.Subject.INFO,\n f'Import file {output.CliFormat.BLACK}{system.config[\"import\"]}{output.CliFormat.ENDC}',\n True\n )\n\n system.config['is_same_client'] = SyncMode.is_same_host()\n\n output.message(\n output.Subject.INFO,\n f'Sync mode: {sync_mode} {output.CliFormat.BLACK}{_description}{output.CliFormat.ENDC}',\n True\n )", "def lock_mode(self) -> str:\n return pulumi.get(self, \"lock_mode\")", "def lock_mode(self) -> str:\n return pulumi.get(self, \"lock_mode\")", "def _get_ldp_in_sync(self):\n return self.__ldp_in_sync", "def get_mode(self):\r\n return self.mode", "def supported_modes(self):\n return [OFF, SYNC, CHARGE]", "def getmode(self):\n return self.mode", "def _get_mode(self):\n self._validate_mode()\n return deepcopy(self.mode)", "def CaptureMode(self):\n if self.force_auto_sync:\n self.get('CaptureMode')\n return self._CaptureMode", "def getSyncObj(self):\n \n return self.sync_obj", "def getSyncInfo (self, connection) :\r\n \r\n if self.state != 'valid' :\r\n return False\r\n \r\n if self.sync_target :\r\n return False\r\n \r\n self.state = 'recv_sync'\r\n self.sync_target = connection\r\n self.do_sync_get()\r\n \r\n return True", "def getMode(self):\n return self._mode", "def mode(self):\n return self._vdma.writechannel.mode", "def last_on_mode(self):\n return self._last_on_mode", "def fsync(var, wrapper, message):\n sync_modes(var)", "def get_mode(self):\r\n return self._api.get_mode()", "def mode(self):\r\n return self._mode", "def mode(self):\r\n return self._mode", "def mode(self):\r\n return self._mode", "def getAddressInSync(self):\n return self._addrInSyncMode", "def auto_mode(self):\n return self._auto_mode", "def mode(self):\n return self.__mode", "def DualMode(self) -> bool:", "def mode(self):\n return self._mode", "def mode(self):\n return self._mode", "def mode(self):\n return self._mode", "def sync_time(self):\n return self.get_sync_time()", "def SynchronizeFlags(self):\n pass", "def SyncClockMaster(self):\n if self.force_auto_sync:\n self.get('SyncClockMaster')\n return self._SyncClockMaster", "def doSync (self) :\r\n \r\n self.factory.getSyncFor(self)", "def mode(self):\n return self._vdma.readchannel.mode", "def __check_mode(self):\n self.mode[\"auto_mode\"] = self.communications.get_mode()", "def mode(self):\n\n return self._mode", "def mode_remote(self):\n self.send(\"!MR\")\n # time.sleep(2.0)\n # No feedback, so query to verify set\n got = self.get_mode()\n assert got == \"R\", got", "def getSyncFor (self, conn) :\r\n for pw, _conn in self.clients :\r\n if _conn and _conn.getSyncInfo(conn) :\r\n self.ongoing_sync_count += 1\r\n return True\r\n \r\n return False", "def operation_mode(self):\n return self._operation_mode", "def IsSynchronized(self) -> bool:", "def mode(self) -> Mode:\n return self._mode", "def _get_ldp_sync_hold_down(self):\n return self.__ldp_sync_hold_down", "def sync() -> None:", "def common_mode(self):\n return self._common_mode", "def common_mode(self):\n return self._common_mode", "def mode(self):\n return self._mode_func", "def strictMode(self):\n return self._strictMode", "def get_config_sync_status(self):\n \n try:\n device_group = self.connection.Management.DeviceGroup.get_list()\n print self.connection.Management.DeviceGroup.get_sync_status([device_group])\n \n except:\n raise Exception(\"Target system has pending configuration, please sync beforehand.\")", "def sync(type, all):\n print(\"Syncing\")", "def get_current_mode(self):\n return self.read(0xa2)", "def get_mode(self) -> str:\n\n return self.send(self.cmd.GET_MODE)", "def setSyncMode(self, IsPauseOn = True):\n self._IsPauseOn = IsPauseOn", "def patch_mode(self):\n return self._meta['patch_mode']", "def currentMode(self):\n logger.debug(\"Func: currentMode/getter\")\n\n return self._currentsDict[\"currentMode\"]", "def tflite_mode(self):\n return getattr(self, \"_tflite_mode\", False)", "def mode(self) -> Mode:\n ...", "def do_sync(self):\n raise NotImplementedError() # pragma: no cover", "def _get_mode(self):\n raise NotImplementedError", "def frame_transfer_mode(self):\n return self.frame_transfer_mode_state", "def mode(self):\n return self._data.get('mode', None)", "def get_mode(self, ):\n return self.get_parameter('mode')", "def lookup_sync(self, flag=0):\n if flag == 1 or self.ser.read() == self.sync[3]:\n if self.ser.read() == self.sync[2]:\n if self.ser.read() == self.sync[1]:\n if self.ser.read() == self.sync[0]:\n return True\n elif self.ser.read() == self.sync[-1]:\n return self.lookup_sync(flag=1)\n else:\n return False\n elif self.ser.read() == self.sync[-1]:\n return self.lookup_sync(flag=1)\n else:\n return False\n elif self.ser.read() == self.sync[-1]:\n return self.lookup_sync(flag=1)\n else:\n return False\n else:\n return False", "def light_sync(self):", "def mode(self):\r\n pass", "def mode(self):\n return self._lift(\"mode\")", "def sync(self):\n return", "def _pre_sync(self):", "def find_sync_loop(self):\n self._find_xbb_infos()\n self._find_exit_only_bbs()\n if not self._build_rd_wrt_list():\n return False, None\n self._find_sync_loop_vars()\n return self.syncinfo.is_sync_loop, self.syncinfo", "def dev_mode(self):\r\n return self._dev_mode", "def FilterMode(self):\n if self.force_auto_sync:\n self.get('FilterMode')\n return self._FilterMode", "def get_fan_mode(self):\n return self.parent._fan_auto_mode", "def getMode(self):\r\n # ViStatus status = AcqrsD1_getMode(ViSession instrumentID,\r\n # ViInt32* mode, ViInt32* modifier, ViInt32* flags)\r\n mode = ViInt32()\r\n modifier = ViInt32()\r\n flags = ViInt32()\r\n self.callFunc('AcqrsD1_getMode', self.session,\r\n byref(mode), byref(modifier), byref(flags))\r\n return (mode.value, modifier.value, flags.value)", "def getSyncState(self, authenticationToken):\r\n pass", "def config_sync(self) -> Optional['outputs.FeatureMembershipConfigmanagementConfigSync']:\n return pulumi.get(self, \"config_sync\")", "def mode(self) -> str:\r\n return self._mode", "def mode(self) -> int:\n return self._mode", "def sync_word(self) -> bytearray:\n # Handle when sync word is disabled..\n if not self.sync_on:\n return None\n # Sync word is not disabled so read the current value.\n sync_word_length = self.sync_size + 1 # Sync word size is offset by 1\n # according to datasheet.\n sync_word = bytearray(sync_word_length)\n self._read_into(_REG_SYNC_VALUE1, sync_word)\n return sync_word", "def SyncClockRef(self):\n if self.force_auto_sync:\n self.get('SyncClockRef')\n return self._SyncClockRef", "def __check_mode_change(self):\n if self.mode[\"auto_mode\"] != self.mode[\"last_mode\"]:\n self.mode[\"last_mode\"] = self.mode[\"auto_mode\"]\n return True\n return False", "def is_time_sync_smart_mode_enabled(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsTimeSyncSmartModeEnabled', self.handle))", "def is_first_synced(self):\n return True", "def synchronous_replication(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"synchronous_replication\")", "def getSyncState(self, authenticationToken):\r\n self.send_getSyncState(authenticationToken)\r\n return self.recv_getSyncState()", "def _get_vc_mode(self):\n return self.__vc_mode", "def get_multiplex_mode(self, c):\n multiplexed = (self.binding.get_switcher_mode() == 1)\n return multiplexed", "def get_pump_mode(self):\n return self.__pump_mode", "def patch_mode(self) -> str:\n return pulumi.get(self, \"patch_mode\")", "def patch_mode(self) -> str:\n return pulumi.get(self, \"patch_mode\")", "def current_fan_mode(self):\n return self._current_fan_mode", "def get_stat(self):\n return os.stat(self.sync_path)", "def get_sync_attrs(self):\n return self._sync_attrs", "def get_socket_mode(self):\n\t\treturn call_sdk_function('PrlVmDevSerial_GetSocketMode', self.handle)", "def _get_modes(self):\n return self.__modes", "def get_preferred_mode(self):\n ret = self._transfer(TVGetModes())\n return ret.data if isinstance(ret, RaspiAckMsg) and ret.ack else None", "def change_mode(self):\n return (self.mode + 1) % 2", "def get_cache_mode(self) -> CacheModeStr:\n return CACHE_MODES.inverse[self.cacheMode()]", "def sync_start(self):" ]
[ "0.72940135", "0.7088847", "0.7082289", "0.7017716", "0.6868106", "0.68618685", "0.68516916", "0.6798914", "0.6798914", "0.6657855", "0.64055157", "0.6349925", "0.63405436", "0.6327484", "0.63198066", "0.63171744", "0.631424", "0.63071305", "0.63044363", "0.62617934", "0.6251813", "0.6222751", "0.619303", "0.619303", "0.619303", "0.6186808", "0.6173106", "0.61379534", "0.61368483", "0.6127583", "0.6127583", "0.6127583", "0.6125888", "0.6124105", "0.6115935", "0.60980964", "0.60827106", "0.60760176", "0.6067037", "0.6065933", "0.6038983", "0.603593", "0.60028195", "0.5994737", "0.5993525", "0.59935147", "0.59785855", "0.59785855", "0.5975027", "0.59712005", "0.59530485", "0.5952344", "0.5949194", "0.5915831", "0.59073913", "0.59045583", "0.5901526", "0.5892203", "0.58886904", "0.5887836", "0.5872273", "0.58637345", "0.5843705", "0.5835319", "0.5826509", "0.5819516", "0.5812832", "0.581084", "0.5766231", "0.57625264", "0.57618654", "0.5746903", "0.5742832", "0.5737512", "0.57308525", "0.572193", "0.5713872", "0.5692287", "0.5671296", "0.5644242", "0.56359047", "0.5625339", "0.5614216", "0.56020105", "0.5600684", "0.5595832", "0.55943507", "0.55695015", "0.55657417", "0.55640423", "0.55640423", "0.5553281", "0.5550448", "0.5548996", "0.55477566", "0.5539976", "0.5528216", "0.55247486", "0.5521722", "0.55214137" ]
0.91350996
0
Checking the sync_mode based on the given configuration
def check_sync_mode(): global sync_mode _description = '' _modes = { SyncMode.RECEIVER: '(REMOTE ➔ LOCAL)', SyncMode.SENDER: '(LOCAL ➔ REMOTE)', SyncMode.PROXY: '(REMOTE ➔ LOCAL ➔ REMOTE)', SyncMode.DUMP_LOCAL: '(LOCAL, ONLY EXPORT)', SyncMode.DUMP_REMOTE: '(REMOTE, ONLY EXPORT)', SyncMode.IMPORT_LOCAL: '(REMOTE, ONLY IMPORT)', SyncMode.IMPORT_REMOTE: '(LOCAL, ONLY IMPORT)', SyncMode.SYNC_LOCAL: '(LOCAL ➔ LOCAL)', SyncMode.SYNC_REMOTE: '(REMOTE ➔ REMOTE)' } for _mode, _desc in _modes.items(): if getattr(SyncMode, 'is_' + _mode.lower())(): sync_mode = _mode _description = _desc if is_import(): output.message( output.Subject.INFO, f'Import file {output.CliFormat.BLACK}{system.config["import"]}{output.CliFormat.ENDC}', True ) system.config['is_same_client'] = SyncMode.is_same_host() output.message( output.Subject.INFO, f'Sync mode: {sync_mode} {output.CliFormat.BLACK}{_description}{output.CliFormat.ENDC}', True )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_sync_mode():\n return sync_mode", "def __check_mode(self):\n self.mode[\"auto_mode\"] = self.communications.get_mode()", "def check_config_mode(self):\n return False", "def __check_mode_change(self):\n if self.mode[\"auto_mode\"] != self.mode[\"last_mode\"]:\n self.mode[\"last_mode\"] = self.mode[\"auto_mode\"]\n return True\n return False", "def get_config_sync_status(self):\n \n try:\n device_group = self.connection.Management.DeviceGroup.get_list()\n print self.connection.Management.DeviceGroup.get_sync_status([device_group])\n \n except:\n raise Exception(\"Target system has pending configuration, please sync beforehand.\")", "def is_config_mode(self):\n\n return self._connection.get_prompt().strip().startswith('(')", "def _change_conf_check(mds_config):\n loop = asyncio.get_event_loop()\n crt = model.async_set_application_config('ceph-fs', mds_config)\n loop.run_until_complete(crt)\n results = _get_conf()\n self.assertEquals(\n results['mds_cache_memory_limit'],\n mds_config['mds-cache-memory-limit'])\n self.assertAlmostEqual(\n float(results['mds_cache_reservation']),\n float(mds_config['mds-cache-reservation']))\n self.assertAlmostEqual(\n float(results['mds_health_cache_threshold']),\n float(mds_config['mds-health-cache-threshold']))", "def read_configuration_mode(self):\n configuration_mode = self.scpi_comm('CONFIG?').strip()\n mode = 'Unknown'\n if configuration_mode == '0':\n mode = 'Voltage tracking'\n if configuration_mode == '2':\n mode = 'Dual output'\n if configuration_mode in ('3', '4'):\n mode = 'Track Voltage and Current'\n return mode", "def DualMode(self) -> bool:", "def _get_ldp_sync_enabled(self):\n return self.__ldp_sync_enabled", "def is_time_sync_smart_mode_enabled(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsTimeSyncSmartModeEnabled', self.handle))", "def isSync(self):\n return False", "def check_config_mode(self, check_string=\">config\", pattern=\"\"):\n return super().check_config_mode(check_string=check_string, pattern=pattern)", "def check_config_mode(self, check_string=\"(config\", pattern=\"\"):\n return super().check_config_mode(check_string=check_string, pattern=pattern)", "def check_config_mode(self, check_string=\")#\", pattern=\"\"):\n return super().check_config_mode(check_string=check_string)", "def synchronize_system_mode(self):\n\n return self._synchronize_system_mode", "def getSyncInfo (self, connection) :\r\n \r\n if self.state != 'valid' :\r\n return False\r\n \r\n if self.sync_target :\r\n return False\r\n \r\n self.state = 'recv_sync'\r\n self.sync_target = connection\r\n self.do_sync_get()\r\n \r\n return True", "def is_delta_sync_enabled(cluster_config):\n\n cluster = load_cluster_config_json(cluster_config)\n try:\n return cluster[\"environment\"][\"delta_sync_enabled\"]\n except KeyError:\n return False", "def supported_modes(self):\n return [OFF, SYNC, CHARGE]", "def check_config_mode(\n self, check_string: str = \")#\", pattern: str = \"\", force_regex: bool = False\n ) -> bool:\n return super().check_config_mode(check_string=check_string, pattern=pattern)", "def config_sync(self) -> Optional['outputs.FeatureMembershipConfigmanagementConfigSync']:\n return pulumi.get(self, \"config_sync\")", "def __getMode( self ):\n\n res = self.rssConfig.getConfigState()\n\n if res == 'Active':\n\n if self.rssClient is None:\n self.rssClient = ResourceStatusClient()\n return True\n\n self.rssClient = None\n return False", "def check_enable_mode(self, *args, **kwargs):\n pass", "def check_manual_mode_change(self, event):\n if self.vehicle.get_manual_mode_change(reset=True):\n data = lambda: None\n data.mode_to_set = \"Inactive\"\n self.set_companion_mode(data)", "def _check_for_sync(self, fl_name):\n fl_sync = True\n # Get the list of flavors names to sync.\n fl_wlist = self.get_flavors_white_list()\n fl_blist = self.get_flavors_black_list()\n\n if (len(fl_wlist) != 0):\n fl_sync = self._regex_comp(fl_name, fl_wlist)\n if (fl_sync and (len(fl_blist) != 0)):\n fl_sync = not(self._regex_comp(fl_name, fl_blist))\n return fl_sync", "def lookup_sync(self, flag=0):\n if flag == 1 or self.ser.read() == self.sync[3]:\n if self.ser.read() == self.sync[2]:\n if self.ser.read() == self.sync[1]:\n if self.ser.read() == self.sync[0]:\n return True\n elif self.ser.read() == self.sync[-1]:\n return self.lookup_sync(flag=1)\n else:\n return False\n elif self.ser.read() == self.sync[-1]:\n return self.lookup_sync(flag=1)\n else:\n return False\n elif self.ser.read() == self.sync[-1]:\n return self.lookup_sync(flag=1)\n else:\n return False\n else:\n return False", "def __get_verify_mode(self):\n ...", "def check_enable_mode(self, check_string='#'):\n return True", "def is_dump():\n return sync_mode in (SyncMode.DUMP_LOCAL, SyncMode.DUMP_REMOTE)", "async def _check_multiple_mode(self):\n logger.info(\"Host {}:Checking multiple mode\".format(self._host))\n out = await self.send_command('show mode')\n if 'multiple' in out:\n self._multiple_mode = True\n\n logger.debug(\"Host {}: Multiple mode: {}\".format(self._host, self._multiple_mode))", "def mode_remote(self):\n self.send(\"!MR\")\n # time.sleep(2.0)\n # No feedback, so query to verify set\n got = self.get_mode()\n assert got == \"R\", got", "def __determine_config_type():", "def lock_mode(self) -> str:\n return pulumi.get(self, \"lock_mode\")", "def lock_mode(self) -> str:\n return pulumi.get(self, \"lock_mode\")", "def tob_connection_synced():\n global app_config\n\n return (\"TOB_CONNECTION\" in app_config) and (app_config[\"TOB_CONNECTION\"] in synced) and (synced[app_config[\"TOB_CONNECTION\"]])", "def _check_config(self):", "def find_sync_loop(self):\n self._find_xbb_infos()\n self._find_exit_only_bbs()\n if not self._build_rd_wrt_list():\n return False, None\n self._find_sync_loop_vars()\n return self.syncinfo.is_sync_loop, self.syncinfo", "def getSyncFor (self, conn) :\r\n for pw, _conn in self.clients :\r\n if _conn and _conn.getSyncInfo(conn) :\r\n self.ongoing_sync_count += 1\r\n return True\r\n \r\n return False", "def check_mode(self):\n if self.proximity.check_press():\n self.cycle_mode()\n return self.mode", "def system(self, mode=None):\n if mode == System.AUTO:\n self.change_request[\"SystemSwitch\"] = System.AUTO\n elif mode == System.COOL:\n self.change_request[\"SystemSwitch\"] = System.COOL\n elif mode == System.HEAT:\n self.change_request[\"SystemSwitch\"] = System.HEAT\n elif mode == System.OFF:\n self.change_request[\"SystemSwitch\"] = System.OFF\n else:\n return False\n return self.change_request[\"SystemSwitch\"]", "def configOperationMode(self):\n\n if self.ui.checkShowAlignStar.isChecked():\n self.ui.checkPolarAlignment.setEnabled(True)\n else:\n self.ui.checkPolarAlignment.setEnabled(False)\n if self.ui.checkPolarAlignment.isChecked():\n self.ui.checkEditNone.setChecked(True)", "def on_premises_sync_enabled(self):\n if \"onPremisesSyncEnabled\" in self._prop_dict:\n return self._prop_dict[\"onPremisesSyncEnabled\"]\n else:\n return None", "def on_premises_sync_enabled(self):\n if \"onPremisesSyncEnabled\" in self._prop_dict:\n return self._prop_dict[\"onPremisesSyncEnabled\"]\n else:\n return None", "def check_config(cfg):", "def config_mode(self):\n return \"\"", "def _calc_relay_mode(\n momentary_mode_on_value,\n momentary_follow_sense_value,\n momentary_on_off_trigger_value,\n):\n if not momentary_mode_on_value:\n return RelayMode.LATCHING\n if momentary_follow_sense_value:\n return RelayMode.MOMENTARY_C\n if momentary_on_off_trigger_value:\n return RelayMode.MOMENTARY_B\n return RelayMode.MOMENTARY_A", "def get_multiplex_mode(self, c):\n multiplexed = (self.binding.get_switcher_mode() == 1)\n return multiplexed", "def check_gpsync_running(options):\n \n return gp.getSyncmasterPID('localhost', options.master_data_dir) > 0", "def test_enable_maintence_mode1(self):\n pass", "def _get_ldp_in_sync(self):\n return self.__ldp_in_sync", "def _exact_mode_match(current_mode, command_modes):\n if not type(command_modes) == list:\n command_modes = [command_modes]\n for mode in command_modes:\n if mode == current_mode:\n return True\n if mode.endswith('*') and mode[:-1] == current_mode:\n return True\n return False", "def is_valid_mode(mode: str) -> bool:\n return mode in (TEST, EASY, HARD)", "def test_mixedModes(self):\n self._sendModeChange(\"+osv\", \"a_user another_user\")\n self._checkModeChange([(True, \"osv\", (\"a_user\", None, \"another_user\"))])\n self._sendModeChange(\"+v-os\", \"a_user another_user\")\n self._checkModeChange(\n [(True, \"v\", (\"a_user\",)), (False, \"os\", (\"another_user\", None))]\n )", "def _checkModeChange(self, expected, target=None):\n result = self._parseModeChange(self.client.calls, target)\n self.assertEqual(result, expected)\n self.client.calls = []", "def _match_current_modes(command, current_mode, modes):\n if current_mode in modes:\n return True\n #\n # if the modes is enable, this works everywhere\n #\n if 'login' in modes:\n return True\n #\n # if the modes is login, and the mode is anything but login,\n # then this is true\n #\n if 'enable' in modes and current_mode != 'login':\n return True\n for mode in modes:\n if mode.endswith('*') and current_mode.startswith(mode[:-1]):\n return True\n if command.get('command-type') == 'config-submode':\n for mode in modes:\n if current_mode.startswith(mode):\n return True\n \n return False", "def is_on(self):\n if self._switch_type == \"record_motion\":\n return self._camera_data[\"recording_mode\"] == TYPE_RECORD_MOTION\n elif self._switch_type == \"record_always\":\n return self._camera_data[\"recording_mode\"] == TYPE_RECORD_ALWAYS\n elif self._switch_type == \"record_smart\":\n return self._camera_data[\"recording_mode\"] == TYPE_RECORD_SMARTDETECT\n elif self._switch_type == \"ir_mode\":\n return self._camera_data[\"ir_mode\"] == self._ir_on_cmd\n elif self._switch_type == \"hdr_mode\":\n return self._camera_data[\"hdr_mode\"] is True\n elif self._switch_type == \"high_fps\":\n return self._camera_data[\"video_mode\"] == TYPE_HIGH_FPS_ON\n else:\n return self._camera_data[\"status_light\"] == \"True\"", "def get_config_spec(cls):\n return False", "def is_target_remote():\n return sync_mode in (SyncMode.SENDER, SyncMode.PROXY, SyncMode.DUMP_REMOTE,\n SyncMode.IMPORT_REMOTE, SyncMode.SYNC_REMOTE)", "def test_hvac_settings_mode() -> None:\n response: models.KamereonVehicleDataResponse = fixtures.get_file_content_as_schema(\n f\"{fixtures.KAMEREON_FIXTURE_PATH}/vehicle_data/hvac-settings.json\",\n schemas.KamereonVehicleDataResponseSchema,\n )\n response.raise_for_error_code()\n\n vehicle_data = cast(\n models.KamereonVehicleHvacSettingsData,\n response.get_attributes(schemas.KamereonVehicleHvacSettingsDataSchema),\n )\n\n assert vehicle_data.mode == \"scheduled\"", "def check_sync(self):\r\n if not self.awaiting_sync:\r\n return True\r\n self.check_ack_queue()\r\n return not self.awaiting_sync", "def test_enable_maintence_mode(self):\n pass", "def syncheck(self) :\n\t\ttry :\n\t\t\treturn self._syncheck\n\t\texcept Exception as e:\n\t\t\traise e", "def check_config(config):\n pass", "def test_execute_clock_sync_command_mode(self):\n self.assert_initialize_driver()\n\n # command the instrument to sync clock.\n self.driver_client.cmd_dvr('execute_resource', ProtocolEvent.CLOCK_SYNC)\n\n reply = self.driver_client.cmd_dvr('get_resource', Parameter.CLOCK)\n \n # convert driver's time from formatted date/time string to seconds integer\n instrument_time = time.mktime(time.strptime(reply.get(Parameter.CLOCK).lower(), \"%Y/%m/%d %H:%M:%S\"))\n\n # need to convert local machine's time to date/time string and back to seconds to 'drop' the DST attribute so test passes\n # get time from local machine\n lt = time.strftime(\"%d %b %Y %H:%M:%S\", time.gmtime(time.mktime(time.localtime())))\n # convert local time from formatted date/time string to seconds integer to drop DST\n local_time = time.mktime(time.strptime(lt, \"%d %b %Y %H:%M:%S\"))\n\n # Now verify that the time matches to within 5 seconds\n self.assertLessEqual(abs(instrument_time - local_time), 5)", "def trigger_mode_available(self, modestr):\n index = self.triggers[modestr]\n ans = self.lib.IsTriggerModeAvailable(ct.c_int(index))\n if ans == 20002:\n return True\n else:\n return False", "def test_using_mirror_output_type():\n\n def check_correct_type(index):\n # Force a race condition\n if index == 0:\n sleep(0.1)\n if index % 2 == 0:\n with _using_mirror_output_type():\n sleep(0.5)\n return cuml.global_settings.output_type == \"mirror\"\n else:\n output_type = test_output_types_str[index]\n with using_output_type(output_type):\n sleep(0.5)\n return cuml.global_settings.output_type == output_type\n\n results = [\n delayed(check_correct_type)(index)\n for index in range(len(test_output_types_str))\n ]\n\n assert (delayed(all)(results)).compute()", "def in_test_mode(mode: str) -> bool:\n return mode == TEST", "def is_time_synchronization_enabled(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsTimeSynchronizationEnabled', self.handle))", "def getMode(self):\n with self.lock:\n mode = self.mode\n return mode", "def fsync(var, wrapper, message):\n sync_modes(var)", "def is_confort(self, now, mode_thermostat='global'):\n confort_global = self.select(). \\\n where(Horaires_confort.debut <= now). \\\n where(Horaires_confort.fin >= now). \\\n where(Horaires_confort.mode == mode_thermostat). \\\n count()\n if mode_thermostat == 'global':\n return confort_global\n else:\n confort = False\n return confort", "def setSyncMode(self, IsPauseOn = True):\n self._IsPauseOn = IsPauseOn", "def hvac_mode(self):\n if self.ac.status is None:\n _LOGGER.debug(f\"hvac_mode: status is None, returning None\")\n return None\n if self.ac.status.is_on:\n ac_mode = self.ac.status.ac_mode\n value = self.HVAC_MODE_MAPPING[ac_mode]\n _LOGGER.debug(f\"hvac_mode: returning {value} (derived from {ac_mode})\")\n return value\n else:\n _LOGGER.debug(f\"hvac_mode: returning HVAC_MODE_OFF - device is off\")\n return HVAC_MODE_OFF", "def mode(self) -> Optional[pulumi.Input['WorkloadMetadataConfigMode']]:\n return pulumi.get(self, \"mode\")", "def in_easy_mode(mode: str) -> bool:\n return mode == EASY", "def is_import():\n return sync_mode in (SyncMode.IMPORT_LOCAL, SyncMode.IMPORT_REMOTE)", "def IsSynchronized(self) -> bool:", "def isMode(mode, check):\n if mode==\"default\" or mode==\"all\":\n return True\n \n if mode.__contains__(check):\n return True\n\n if check.__contains__(\"_\"):\n check_modes = check.split(\"_\")\n for check_mode in check_modes:\n if not isMode(mode, check_mode):\n return False\n return True\n\n return False", "def semantics_changes(config):\n config_global_value = global_value(config, None)\n in_match_enabled = False\n if not config.permit_root_login:\n return True\n\n for opt in config.permit_root_login:\n if opt.value == \"yes\" and opt.in_match is not None and \\\n opt.in_match[0].lower() != 'all':\n in_match_enabled = True\n\n return config_global_value is None and not in_match_enabled", "def _get_mode(self):\n self._validate_mode()\n return deepcopy(self.mode)", "def get_sync_status(self, connector_id, previous_completed_at):\n # @todo Need logic here to tell if the sync is not running at all and not\n # likely to run in the near future.\n connector_details = self.get_connector(connector_id)\n succeeded_at = self._parse_timestamp(connector_details[\"succeeded_at\"])\n failed_at = self._parse_timestamp(connector_details[\"failed_at\"])\n current_completed_at = (\n succeeded_at if succeeded_at > failed_at else failed_at\n )\n\n # The only way to tell if a sync failed is to check if its latest\n # failed_at value is greater than then last known \"sync completed at\" value.\n if failed_at > previous_completed_at:\n service_name = connector_details[\"service\"]\n schema_name = connector_details[\"schema\"]\n raise AirflowException(\n f'Fivetran sync for connector \"{connector_id}\" failed; '\n f\"please see logs at \"\n f\"{self._connector_ui_url_logs(service_name, schema_name)}\"\n )\n\n sync_state = connector_details[\"status\"][\"sync_state\"]\n self.log.info(f'Connector \"{connector_id}\": sync_state = {sync_state}')\n\n # Check if sync started by FivetranOperator has finished\n # indicated by new 'succeeded_at' timestamp\n if current_completed_at > previous_completed_at:\n self.log.info('Connector \"{}\": succeeded_at: {}'.format(\n connector_id, succeeded_at.to_iso8601_string())\n )\n return True\n else:\n return False", "def getAddressInSync(self):\n return self._addrInSyncMode", "def is_config(command): \n if command.startswith('<') and command.endswith('>') and \\\n ('WRITE' not in command) and ('READ' not in command):\n return True\n else:\n return False\n # end if", "def tflite_mode(self):\n return getattr(self, \"_tflite_mode\", False)", "def common_mode(self):\n return self._common_mode", "def common_mode(self):\n return self._common_mode", "def _check_sql_mode(self, **kwargs):\n return []", "def notify_mode_change(self, mode):\n pass", "def set_time_sync_smart_mode_enabled(self, bEnabled):\n\t\tcall_sdk_function('PrlVmCfg_SetTimeSyncSmartModeEnabled', self.handle, bEnabled)", "def custom_assess_status_check(self):\n check_config_set = []\n if self.backup_target_type == \"nfs\":\n check_config_set = ['nfs-shares']\n elif self.backup_target_type == \"s3\":\n check_config_set = [\n \"tv-s3-secret-key\",\n \"tv-s3-access-key\",\n \"tv-s3-region-name\",\n \"tv-s3-bucket\",\n \"tv-s3-endpoint-url\"]\n unset_config = [c for c in check_config_set if not hookenv.config(c)]\n if unset_config:\n return \"blocked\", \"{} configuration not set\".format(\n ', '.join(unset_config))\n # For s3 support backup-target-type should be set to 'experimental-s3'\n # as s3 support is pre-production. The self.backup_target_type\n # property will do any transaltion needed.\n if self.backup_target_type not in [\"nfs\", \"s3\"]:\n return \"blocked\", \"Backup target type not supported\"\n return None, None", "def host_maintenance_mode(self, host, mode):\n if not mode:\n return 'off_maintenance'\n return 'on_maintenance'", "def host_maintenance_mode(self, host, mode):\n if not mode:\n return 'off_maintenance'\n return 'on_maintenance'", "def test_get_irc_mode(matrix):\n matrix.charm_config[\"enable-tls\"] = True\n assert matrix.get_irc_mode() == \"tcp+tls\"\n matrix.charm_config[\"enable-tls\"] = False\n assert matrix.get_irc_mode() == \"tcp\"", "def test_mode_from_knx(self):\n assert DPTHVACMode.from_knx((0x00,)) == HVACOperationMode.AUTO\n assert DPTHVACMode.from_knx((0x01,)) == HVACOperationMode.COMFORT\n assert DPTHVACMode.from_knx((0x02,)) == HVACOperationMode.STANDBY\n assert DPTHVACMode.from_knx((0x03,)) == HVACOperationMode.NIGHT\n assert DPTHVACMode.from_knx((0x04,)) == HVACOperationMode.FROST_PROTECTION", "def get_window_mode(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetWindowMode', self.handle)", "def antenny_config_check(self):\n return self.antenny_config.check()", "def isDefaultMode():\n\treturn 0", "def testWrongMode(self):\n self.mgr.status = mavutil.mavlink.GOPRO_HEARTBEAT_STATUS_DISCONNECTED\n self.mgr.handleRecordCommand( CAPTURE_MODE_VIDEO, RECORD_COMMAND_TOGGLE )\n self.assertFalse(self.mgr.sendGoProCommand.called)", "def check_device_state(self):", "def mode(self):\n if self._vsanobj.id is None:\n raise VsanNotPresent(\"Vsan \" + str(self._vsanobj._id) + \" is not present on the switch.\")\n out = self.__show_zone_status()\n return out[get_key(zonekeys.MODE, self._SW_VER)]" ]
[ "0.72925556", "0.692381", "0.6704957", "0.63555396", "0.63372236", "0.61973035", "0.61619157", "0.6108568", "0.6087788", "0.6085571", "0.6059644", "0.60411906", "0.60293037", "0.60175145", "0.594344", "0.59315693", "0.588756", "0.58717567", "0.58026725", "0.580076", "0.5800057", "0.579664", "0.5757282", "0.5745641", "0.5716965", "0.57119477", "0.5677909", "0.5669484", "0.56475365", "0.5643412", "0.56171644", "0.5615934", "0.55555683", "0.55555683", "0.5544223", "0.55349296", "0.5533601", "0.5494097", "0.5473345", "0.5461596", "0.5456243", "0.5450835", "0.5450835", "0.5403181", "0.539877", "0.5392249", "0.53834045", "0.5381016", "0.53691816", "0.5363916", "0.53636235", "0.53630406", "0.5359551", "0.53398603", "0.53214467", "0.527817", "0.5276147", "0.52666044", "0.52609533", "0.5252898", "0.5243731", "0.52407044", "0.52324545", "0.52317303", "0.52310324", "0.5227998", "0.522648", "0.52240723", "0.5221076", "0.5199625", "0.5198923", "0.5197193", "0.51901007", "0.51703423", "0.51538414", "0.5147889", "0.5136712", "0.5132983", "0.51248085", "0.51220316", "0.5119724", "0.5110847", "0.5106215", "0.5101508", "0.5095426", "0.5095426", "0.50914776", "0.50911367", "0.50878805", "0.5086441", "0.5084523", "0.5084523", "0.5067566", "0.506275", "0.505931", "0.5055102", "0.5040421", "0.5039461", "0.5036281", "0.5034183" ]
0.78860736
0
Check if given client is remote client
def is_remote(client): if client == Client.ORIGIN: return is_origin_remote() elif client == Client.TARGET: return is_target_remote() elif client == Client.LOCAL: return False else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_remote(self): # -> Any | bool:\n ...", "def is_remote(self):\n if socket.gethostbyname(socket.gethostname()).startswith('10.7'):\n return False\n else:\n return True", "def is_local_client(self):\n return self.msg.is_local_client", "def is_remote(self):\n return False", "def is_remote(self):\n raise NotImplementedError()", "def is_remote(self):\n\t\treturn bool(call_sdk_function('PrlVmDev_IsRemote', self.handle))", "def is_client(self):\n if not hasattr(self, '_is_client'):\n self._is_client = hasattr(self, 'client')\n return self._is_client", "def is_client_alive(self, client):\n client_conn = self.all_clients[client]\n try:\n\n ping_message = Message(\"server\", client, \"utility\", \"ping\")\n client_conn.send(str.encode(ping_message.pack_to_json_string()))\n\n except Exception as e:\n print(\"Client communication error \" + str(e))\n return False\n return True", "def test_connection(remote=False):\n import socket\n remote_server = 'www.google.com' if not remote else remote # TODO: maybe improve for China\n try:\n # does the host name resolve?\n host = socket.gethostbyname(remote_server)\n # can we establish a connection to the host name?\n con = socket.create_connection((host, 80), 2)\n return True\n except:\n print(\"Can't connect to a server...\")\n pass\n return False", "def has_client(self, ip):\n for cli in self.clients:\n if cli.ip == ip:\n return cli\n return None", "def is_target_remote():\n return sync_mode in (SyncMode.SENDER, SyncMode.PROXY, SyncMode.DUMP_REMOTE,\n SyncMode.IMPORT_REMOTE, SyncMode.SYNC_REMOTE)", "def client_exists(self, client=None):\n if type(client) is Client:\n return client.client_id in [c.client_id for c in self.client_list]\n else:\n return False", "def check(client: Client):\n pass", "def isClientMultiplexingInterface(self):\n adaptation = self.getServerAdaptationFunction()\n if adaptation == None:\n return False # no adaptatation underneath\n else:\n clientcount = adaptation.getClientCount() # max. number of clients; None means unlimited\n return (clientcount != 1)", "def has_client(self):\n \n return len(self._clients) > 0", "def has_client(self):\n \n return len(self._clients) > 0", "def remote(self):\r\n return self._url.scheme in ('http', 'https')", "def is_cups_server(rm):\n try:\n s = socket.socket()\n s.settimeout(0.3)\n s.connect((rm, 631))\n s.close()\n\n return True\n except (socket.error, socket.timeout):\n return False", "def is_origin_remote():\n return sync_mode in (SyncMode.RECEIVER, SyncMode.PROXY, SyncMode.DUMP_REMOTE,\n SyncMode.IMPORT_REMOTE, SyncMode.SYNC_REMOTE)", "async def check_client(self, client_id: Identity) -> AuthResult:\n raise NotImplementedError", "def is_remote(state: 'JobState') -> bool:\n return state in [\n JobState.WAITING, JobState.WAITING_CR, JobState.RUNNING,\n JobState.RUNNING_CR\n ]", "def user_has_perms_on_client(user, client):\n if client and client not in user.clients:\n return False\n\n return True", "def _client_allowed(self):\r\n client_ip = self._client_address[0]\r\n if not client_ip in self._settings.allowed_clients and \\\r\n not 'ALL' in self._settings.allowed_clients:\r\n self._send_content('Access from host %s forbidden.' % client_ip, 'text/html')\r\n return False\r\n return True", "def test_is_remote(self):\n self.assertEqual(self.project.is_remote(), False)", "def is_allowed_to_see_clients(session):\n val = session.get(\"allowed_to_see_clients\")\n # Check to see if their permissions are still valid\n if val and TimeUtils.get_local_timestamp() < val[1]:\n return val[0]\n\n user = None\n\n for server in settings.INSTALLED_GITSERVERS:\n gitserver = models.GitServer.objects.get(host_type=server[\"type\"], name=server[\"hostname\"])\n auth = gitserver.auth()\n user = auth.signed_in_user(gitserver, session)\n if not user:\n continue\n\n api = user.api()\n for authed_user in server.get(\"authorized_users\", []):\n if user.name == authed_user or is_team_member(session, api, authed_user, user):\n logger.info(\"'%s' is a member of '%s' and is allowed to see clients\" % (user, authed_user))\n session[\"allowed_to_see_clients\"] = (True,\n TimeUtils.get_local_timestamp() + settings.PERMISSION_CACHE_TIMEOUT)\n return True\n logger.info(\"%s is NOT allowed to see clients on %s\" % (user, gitserver))\n session[\"allowed_to_see_clients\"] = (False, TimeUtils.get_local_timestamp() + settings.PERMISSION_CACHE_TIMEOUT)\n return False", "def is_remote(path: Text) -> bool:\n\n # TODO(Alex): add check for another remote storages (s3, ...) when they will be supported\n if path.startswith('gs://'):\n return True\n\n return False", "def local(self):\n return self.hostname == \"localhost\" and self.user is None and self.ssh_args is None", "def check_credentials(client):\n pid, uid, gid = get_peercred(client)\n\n euid = os.geteuid()\n client_name = \"PID:%s UID:%s GID:%s\" % (pid, uid, gid)\n if uid not in (0, euid):\n raise SuspiciousClient(\"Can't accept client with %s. It doesn't match the current EUID:%s or ROOT.\" % (\n client_name, euid\n ))\n\n _LOG(\"Accepted connection on fd:%s from %s\" % (client.fileno(), client_name))\n return pid, uid, gid", "def _is_self(self, ip, port):\n import socket as sk\n self_ip = sk.gethostbyname(sk.gethostname())\n self_port = self.config['API_PORT']\n return str(self_ip) == ip and self_port == port", "def _check_connection() -> bool:\n return bool(subprocess.check_output([\"hostname\", \"-I\"]))", "def check_client(self):\n self.current_state = \"No Network\"\n if self.esp_mgr.ap:\n if self.client_socket:\n # client exists\n self.current_state = \"Connected\"\n if self.client_socket.connected():\n if self.client_socket.available():\n data = self.client_socket.recv()\n if data:\n self._add_to_buffer(data)\n else:\n self._close_client()\n if time.monotonic() > self.test_connection:\n data = bytes([0])\n self.send_to_client(data)\n else:\n self._close_client()\n \n else:\n # check for new client\n self.current_state = \"Listening port 23\"\n # reset termious hack\n self.termious = None\n client_sock_num = self.esp_mgr.esp.socket_available(self.server_socket.socknum)\n if client_sock_num != adafruit_esp32spi_socket.NO_SOCKET_AVAIL:\n # new connection\n self.current_state = \"Connected\"\n self.test_connection = time.monotonic() + 5\n self.client_socket = adafruit_esp32spi_socket.socket(socknum=client_sock_num)\n \n self.send_telnet_command([telnet_IAC, telnet_cmd_codes['WONT'], telnet_opt_codes['Echo']])\n self.send_telnet_command([telnet_IAC, telnet_cmd_codes['WONT'], telnet_opt_codes['Suppress GA']])\n return self.current_state", "def in_host():\n return not in_docker()", "def is_client(self) -> bool:\n return self.zone.SharedRoomID and not self.zone.MasterMode", "def test_is_remote_source(self):\n self.assertEqual(self.project.is_remote(), False)", "def check_client(self, client_pid):\n if (client_pid in self._active_sessions.get_clients() and\n not psutil.pid_exists(client_pid)):\n self._close_session_completely(client_pid)\n return False\n return True", "def _is_remote_branch(self, branch_reference):\n return branch_reference.startswith(\"refs/remotes/\")", "def IsRemoteRerun(self):\n return self.IsRerun() and not self.IsLocalRerun()", "def is_peered_with(self, other: SkupperSite) -> bool:\n if not self.cluster.peering:\n return False\n\n for c in self.cluster.peering.connections:\n if (\n isinstance(\n c,\n (\n ClusterPeeringConnectionClusterRequesterV1,\n ClusterPeeringConnectionClusterAccepterV1,\n ),\n )\n ) and c.cluster.name == other.cluster.name:\n return True\n return False", "def is_remote_session():\n return os.environ.get('SSH_TTY', os.environ.get('SSH_CONNECTION'))", "def is_host(self):\n return self.host", "def is_virtual_network_host():\n return False", "def isConnected():", "def _hostOK(self, host):\n if os.system(\"ping -c 1 $node &> /dev/null\"):\n # No access to host\n return False\n elif os.system(\"ssh -n -a -x $node 'ls' &> /dev/null\"):\n # No route to host\n return False\n else:\n return True", "def register_client(self, client):\r\n if isinstance(client, IridiumTransportClient):\r\n if self.__digi_iridium_supported == False:\r\n iridium_manager_tracer.error(\"Settings: \" \\\r\n \"Device does not support Iridium! \" \\\r\n \"Ensure you have a product that supports Iridium \" \\\r\n \"and has up to date firmware!\")\r\n return False\r\n elif isinstance(client, iDigiIridiumTransportClient):\r\n if self.__idigi_iridium_supported == False:\r\n iridium_manager_tracer.error(\"Settings: \" \\\r\n \"Device does not support iDigi Iridium!\" \\\r\n \"Ensure you have a product that supports iDigi Iridium \" \\\r\n \"and has up to date firmware!\")\r\n return False\r\n else:\r\n iridium_manager_tracer.warning(\"Client not of valid type.\")\r\n return False\r\n\r\n self.__client_list.append(client)\r\n return True", "def verify_user_existance(self, user):\n for client in self.clients:\n if user == client.get_name():\n return True\n return False", "def is_connected(self):\n if self.server: return True\n return False", "def istype(client, service_name: str):\n\n if is_client(client):\n return (\n client.meta.service_model.service_name.lower()\n == service_name.strip().lower()\n )\n return False", "def exists_remote(host, path):\n command = \"test -e \" + pipes.quote(path) + \" && echo 0 || echo 1\"\n (stdoutstring, stderrstring) = execute_ssh_command(host, port, USER, PASSWORD, None, None, command)\n\n for status in stdoutstring:\n if re.search('0', status):\n return True\n if re.search('1', status):\n return False", "def _is_sshd_server_running(self, timeout=1):\n try:\n self.ssh_client.connect(timeout=timeout)\n self.ssh_client.close()\n return True\n except Exception:\n return False", "def test_z_remote_command(self):\n\t\ttheResult = False\n\t\ttry:\n\t\t\timport subprocess\n\t\t\ttheOutputtext = subprocess.check_output([\"which\", \"check_nrpe\"])\n\t\t\tif (str(\"/check_nrpe\") in str(theOutputtext)):\n\t\t\t\ttheResult = True\n\t\texcept Exception:\n\t\t\ttheResult = False\n\t\t\ttry:\n\t\t\t\ttheOutputtext = subprocess.check_output([\"which\", \"ssh\"])\n\t\t\t\tif (str(\"/ssh\") in str(theOutputtext)):\n\t\t\t\t\ttheResult = True\n\t\t\texcept Exception:\n\t\t\t\ttheResult = False\n\t\tassert theResult", "def remote(self):\n return self.client_address", "def is_connected():\r\n ipconfig_output = terminal('ipconfig | findstr /i gateway')\r\n if ipconfig_output != None:\r\n return any(i for i in ipconfig_output if i.isdigit())\r\n \r\n # Alternative way if ipconfig has error in some systems\r\n ## Slower than ipconfig workaround\r\n try:\r\n socket().connect(('8.8.8.8', 53))\r\n return True\r\n except:\r\n return False", "def status_check(self):\n try:\n client = self.connect()\n client.sys.is_initialized() # make an actual network connection\n return True\n except:\n return False", "def handle_client(client): # Takes client socket as argument.\n\tr_packet = client.recv(BUFSIZ).decode(\"utf8\")\n\tar_packet = r_packet\n\tr_packet = r_packet.split(\"~\")\n\n\tfor sock in clients:\n\t\tif(clients[sock] == r_packet[0]):\n\t\t\tsock.send(bytes(ar_packet,\"utf8\"))", "def _connect(self, client):\n if client.transport.isOpen():\n return True\n\n try:\n client.transport.open()\n return True\n except Thrift.ErrorT, tx:\n if tx.message:\n message = tx.message\n else:\n message = \"Transport error, reconnect\"\n client.transport.close()\n raise ErrorThriftMessage(message)\n except Exception, e:\n client.transport.close()\n\n return False", "def remote(self):\n return self.getItunesAttribute('Track Type') == 'Remote'", "def isInternal(self):\n\n\t\t# TODO optimization do we really need to look at the host attributes?\n\t\t# maybe we can just use the global attribute (faster)\n\t\tfe = self.newdb.getFrontendName()\n\t\tnetwork = self.newdb.getHostAttr(fe, 'Kickstart_PrivateNetwork')\n\t\tnetmask = self.newdb.getHostAttr(fe, 'Kickstart_PrivateNetmask')\n\n\t\t# Test based on our client's IP address.\n\t\twork = string.split(network, '.')\n\t\tmask = string.split(netmask, '.')\n\t\tip = string.split(self.clientList[-1], '.')\n\n\t\tfor i in range(0, len(ip)):\n\t\t\ta = int(ip[i]) & int(mask[i])\n\t\t\tb = int(work[i]) & int(mask[i])\n\n\t\t\tif a != b:\n\t\t\t\treturn 0\n\n\t\treturn 1", "def identify_client(self,protocol):\n if protocol.resident:\n return protocol.peer\n #pdb.set_trace()", "def can_proxy_restclient(request, service, url):\n if not hasattr(request, \"can_proxy_restclient\"):\n request.can_proxy_restclient = is_admin()\n return request.can_proxy_restclient", "def server_exists(client, server_url):\n data = {\"server_url\": server_url}\n return client._creoson_post(\"windchill\", \"server_exists\", data, \"exists\")", "def CheckIfConnecting(self):\n if self.CheckIfWiredConnecting() or self.CheckIfWirelessConnecting():\n return True\n else:\n return False", "def _verify_http_connection(self, ssh_client, ssh_server,\n test_ip, test_port, servers, should_pass=True):\n utils.kill_nc_process(ssh_server)\n url = 'http://%s:%d' % (test_ip, test_port)\n utils.spawn_http_server(ssh_server, port=test_port, message='foo_ok')\n utils.process_is_running(ssh_server, 'nc')\n try:\n ret = utils.call_url_remote(ssh_client, url)\n if should_pass:\n self.assertIn('foo_ok', ret)\n return\n self.assertNotIn('foo_ok', ret)\n except Exception as e:\n if not should_pass:\n return\n self._log_console_output(servers)\n self._log_local_network_status()\n raise e", "def ping(client: MobileClient) -> bool:\n if client.client_type == ClientType.WEBHOOK:\n return TBANSHelper._ping_webhook(client)\n else:\n return TBANSHelper._ping_client(client)", "def isconnected(self) -> bool:", "def test_connectivity(verbose=False):\n\n connectable = None\n time_out = 1 # Number of seconds for timeout\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n try:\n ssh.connect(env.host, timeout=time_out)\n connectable = True\n except paramiko.AuthenticationException:\n print(\"Error: On host = {Host}, Authentication problem during connectivity test\"\n .format(Host = env.host))\n connectable = False\n except socket.error, e:\n print(\"Error: On host = {Host}, Communication problem during connectivity test\"\n .format(Host = env.host))\n connectable = False\n ssh.close()\n\n if verbose:\n print(\"{Host:4} | connectable?: {Connectable}\".format(Host=env.host, Connectable=connectable))\n\n return connectable", "def exists_remote(host, path):\n status = subprocess.call(\n ['ssh', host, 'test -f {}'.format(pipes.quote(path))])\n if status == 0:\n return True\n if status == 1:\n return False\n raise Exception('SSH failed')", "def is_alive(addr, user):\n return _ssh_master_cmd(addr, user, 'check') == 0", "def remote_publishing_master():\n return remote_publishing() and hasattr(settings, 'NEWS_REMOTE_ROLE') \\\n and settings.NEWS_REMOTE_ROLE is 'MASTER'", "async def _test_url(self, client, url):\n\n with async_timeout.timeout(10):\n websession = async_get_clientsession(self.hass)\n client = Client(websession, base_url=url)\n try:\n await client.exists()\n except NamfError:\n return False\n return True", "def check_master(client, master_only=False):\n if master_only and not is_master_node(client):\n logger.info('Master-only flag detected. Connected to non-master node. Aborting.')\n sys.exit(9)", "def is_channel_owner():\n\n async def check(ctx):\n if ctx.guild:\n owner = ctx.author == ctx.guild.owner\n if not owner:\n await ctx.send(\"I guess you are not this server's pogchamp. Bruh.\")\n return owner\n return True\n\n return commands.check(check)", "def _check_is_client_duped(client, client_id):\n try:\n other_client = CLIENTS[client_id]\n except KeyError:\n return\n \n if other_client is not client:\n raise RuntimeError(\n f'Creating the same client multiple times is not allowed; {client!r} already exists:, {other_client!r}.'\n )", "def remote_publishing_slave():\n return remote_publishing() and hasattr(settings, 'NEWS_REMOTE_ROLE') \\\n and settings.NEWS_REMOTE_ROLE is 'SLAVE'", "def __check_ssh(self):\n sfcs = self.sshTunnelDict[\"target_ip\"]\n\n cmd = \"ps aux | grep ssh | awk '{print $20}'\"\n result = subprocess.Popen(cmd,\n shell= True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = result.communicate()\n if sfcs not in stdout.decode():\n return False\n else: return True", "def is_central_server() -> bool:\n return hasattr(Config().algorithm,\n 'cross_silo') and Config().args.port is None", "def getRemoteHost():", "def check_id(server_id, user_id):\n\n # The user_id parameters here is the same as nym_id in other api calls\n\n # The method is described as a \"ping\" in the API documentation, which should\n # be called after wallet initialized. However a remote account on the server\n # is required.\n\n if hasattr(opentxs, 'OTAPI_Wrap_pingNotary'): # new api name\n retval = opentxs.OTAPI_Wrap_pingNotary(server_id, user_id)\n else: # todo: old api name, remove in due time\n retval = opentxs.OTAPI_Wrap_checkServerID(server_id, user_id)\n\n print(\"(debug) check_server_id retval=\", retval)\n\n # The return value `1` for success is defined by\n # case (OTClient::checkServerId)\n # in OTClient::ProcessUserCommand()\n\n return retval == 1", "def validate_client_id(self, client_id, request, *args, **kwargs):\n log.debug('Validate client %r', client_id)\n client = request.client or self._clientgetter(client_id)\n if client:\n # attach client to request object\n request.client = client\n return True\n return False", "def is_connected(cls,socket):\n pass", "async def is_server_live(self, headers: dict[str, t.Any] = ...) -> bool:", "def prepareRemote(self, client, host):\n if not os.path.exists( client.location ) or not os.path.isdir( client.location ):\n raise Exception( \"The sources of client {0} should be found in local directory '{1}', but that either doesn't exist or is not a directory.\".format( client.name, client.location ) )\n if not source.prepareRemote(self, client, host):\n return False\n if self.isInCleanup():\n return\n host.sendFiles( client.location, self.remoteLocation(client, host) )\n return True", "def _try_connect(_host, _port, _client_id):\n try:\n conn = OpenRGBClient(_host, _port, name=_client_id)\n conn.comms.stop_connection()\n except OSError as exc:\n raise CannotConnect from exc\n\n return True", "def is_connected(self) -> bool:", "def on_connect_remote(remote_client, userdata, flags, rc):\n if rc == 0:\n remote_client.connected_flag = True\n print(\"Connected remote OK returned code = \", rc)\n else:\n print(\"Bad connection remote Returned code = \", rc)", "def executed_on_which_server(self, client, fn, *args, **kwargs):\n client.has_read_from.clear()\n fn(*args, **kwargs)\n self.assertEqual(1, len(client.has_read_from))\n return one(client.has_read_from)", "def record_client_address(self) -> bool:\n return pulumi.get(self, \"record_client_address\")", "def isconnected(self) -> bool:\n ...", "def authenticated(self):\n client_token = self.get_cookie(\"PA-client-token\")\n if not client_token:\n print(\"no cookie\")\n return False\n\n headers = cherrypy.request.headers\n if \"Remote-Addr\" not in headers:\n print(\"no IP\")\n return False\n\n to_hash = \"Python-Aboard \" + headers.get(\"Remote-Addr\", \"none\")\n to_hash += \" \" + headers.get(\"User-Agent\", \"unknown\")\n to_hash = to_hash.encode()\n token = hashlib.sha256(to_hash).digest()\n return client == client_token", "def remote():\n pass", "def is_dialing(self) -> bool:", "def rpc_test_connection(client, rpc_server, rpc_user=BTC_RPC_USER, rpc_password=BTC_RPC_PASSWD, rpc_port=BTC_RPC_PORT):\n try:\n get_info = rpc_getinfo(client, rpc_server, rpc_user=rpc_user, rpc_password=rpc_password, rpc_port=rpc_port)\n print(get_info)\n return True\n except JSONRPCException as err:\n return False", "def is_remote_access_allowed(self, path: str):\n return self.public_path_marker.test(path) or self.is_public(path) and not self.is_private(path)", "def is_connected(self) -> bool:\n return False if self._snitun is None else self._snitun.is_connected", "def authenticate_client(self, request, *args, **kwargs):\n auth = request.headers.get('Authorization', None)\n log.debug('Authenticate client %r', auth)\n if auth:\n try:\n _, s = auth.split(' ')\n client_id, client_secret = decode_base64(s).split(':')\n client_id = to_unicode(client_id, 'utf-8')\n client_secret = to_unicode(client_secret, 'utf-8')\n except Exception as e:\n log.debug('Authenticate client failed with exception: %r', e)\n return False\n else:\n client_id = request.client_id\n client_secret = request.client_secret\n\n client = self._clientgetter(client_id)\n if not client:\n log.debug('Authenticate client failed, client not found.')\n return False\n\n request.client = client\n\n if client.client_secret and client.client_secret != client_secret:\n log.debug('Authenticate client failed, secret not match.')\n return False\n\n log.debug('Authenticate client success.')\n return True", "def is_controlled(self):\n return False if self._remote_controller == \"\" else True", "def _check_tunnel(self, _srv):\n if self.skip_tunnel_checkup:\n self.tunnel_is_up[_srv.local_address] = True\n return\n self.logger.info('Checking tunnel to: {0}'.format(_srv.remote_address))\n if isinstance(_srv.local_address, string_types): # UNIX stream\n s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n else:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.settimeout(TUNNEL_TIMEOUT)\n try:\n # Windows raises WinError 10049 if trying to connect to 0.0.0.0\n connect_to = ('127.0.0.1', _srv.local_port) \\\n if _srv.local_host == '0.0.0.0' else _srv.local_address\n s.connect(connect_to)\n self.tunnel_is_up[_srv.local_address] = _srv.tunnel_ok.get(\n timeout=TUNNEL_TIMEOUT * 1.1\n )\n self.logger.debug(\n 'Tunnel to {0} is DOWN'.format(_srv.remote_address)\n )\n except socket.error:\n self.logger.debug(\n 'Tunnel to {0} is DOWN'.format(_srv.remote_address)\n )\n self.tunnel_is_up[_srv.local_address] = False\n\n except queue.Empty:\n self.logger.debug(\n 'Tunnel to {0} is UP'.format(_srv.remote_address)\n )\n self.tunnel_is_up[_srv.local_address] = True\n finally:\n s.close()", "def UnitTestClient(self):\n print('\\n--client')\n print('client pid is ' + str(os.getpid()))\n try:\n s = socket.socket()\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s.connect((socket.gethostbyname('localhost'), 25692))\n self.ExchangeClient(s)\n print('Shared secret: ' + str(self.SharedSecret))\n msg = self.recvStr(s)\n if msg == 'An encrypted message':\n print('Server message received OK')\n else:\n raise Exception('Failed to decrypt server message!')\n \n self.sendStr(s, 'Thanks for sharing')\n s.close()\n \n except Exception as e:\n traceback.print_exc(file=sys.stdout)\n s.close()\n return False\n \n return True", "def check_parameter_server(self, controller):\n for name in get_rosparam_controller_names(\"/\"):\n if name == controller:\n return True\n return False", "def connect(self, host):\n return False", "def check_nick_in_use(self, nick):\n return self.get_client_by_nick(nick) is not None" ]
[ "0.7279024", "0.7175781", "0.70540047", "0.6942053", "0.6814598", "0.6683476", "0.66315174", "0.66083586", "0.6517704", "0.6482558", "0.6460855", "0.6398728", "0.63722503", "0.6251613", "0.62041306", "0.62041306", "0.61882174", "0.615462", "0.61380357", "0.6073914", "0.6026141", "0.60112673", "0.60034007", "0.59746414", "0.5943694", "0.59414196", "0.5917503", "0.5916974", "0.5906758", "0.58996147", "0.5867779", "0.58547497", "0.5849106", "0.5840907", "0.5828129", "0.5820942", "0.58174926", "0.5799657", "0.5788218", "0.5753198", "0.5749985", "0.5728437", "0.57179564", "0.56987983", "0.5698164", "0.5698083", "0.5697406", "0.5666022", "0.5658869", "0.565505", "0.5647618", "0.5617524", "0.5593666", "0.55649495", "0.55615366", "0.5556274", "0.55559886", "0.555304", "0.55401367", "0.55323005", "0.55315155", "0.5518082", "0.5506662", "0.55031496", "0.5476782", "0.5473323", "0.5469798", "0.54697096", "0.54688996", "0.5464378", "0.54516983", "0.54502165", "0.5447083", "0.54374593", "0.5430913", "0.542985", "0.54273236", "0.5393048", "0.53910047", "0.5389272", "0.5388621", "0.5386086", "0.53802866", "0.53778535", "0.5372433", "0.53684896", "0.53680915", "0.53661966", "0.5362908", "0.53514045", "0.5350655", "0.53404796", "0.53296417", "0.5327172", "0.5326952", "0.53212744", "0.53170127", "0.5308961", "0.5308281", "0.53013414" ]
0.83206755
0
Check if target is remote client
def is_target_remote(): return sync_mode in (SyncMode.SENDER, SyncMode.PROXY, SyncMode.DUMP_REMOTE, SyncMode.IMPORT_REMOTE, SyncMode.SYNC_REMOTE)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_remote(client):\n if client == Client.ORIGIN:\n return is_origin_remote()\n elif client == Client.TARGET:\n return is_target_remote()\n elif client == Client.LOCAL:\n return False\n else:\n return False", "def is_remote(self): # -> Any | bool:\n ...", "def is_remote(self):\n if socket.gethostbyname(socket.gethostname()).startswith('10.7'):\n return False\n else:\n return True", "def is_remote(self):\n return False", "def is_remote(self):\n raise NotImplementedError()", "def is_remote(self):\n\t\treturn bool(call_sdk_function('PrlVmDev_IsRemote', self.handle))", "def is_local_client(self):\n return self.msg.is_local_client", "def remote(self):\r\n return self._url.scheme in ('http', 'https')", "def test_is_remote_source(self):\n self.assertEqual(self.project.is_remote(), False)", "def is_origin_remote():\n return sync_mode in (SyncMode.RECEIVER, SyncMode.PROXY, SyncMode.DUMP_REMOTE,\n SyncMode.IMPORT_REMOTE, SyncMode.SYNC_REMOTE)", "def test_is_remote(self):\n self.assertEqual(self.project.is_remote(), False)", "def test_connection(remote=False):\n import socket\n remote_server = 'www.google.com' if not remote else remote # TODO: maybe improve for China\n try:\n # does the host name resolve?\n host = socket.gethostbyname(remote_server)\n # can we establish a connection to the host name?\n con = socket.create_connection((host, 80), 2)\n return True\n except:\n print(\"Can't connect to a server...\")\n pass\n return False", "def is_host(self):\n return self.host", "def is_client(self):\n if not hasattr(self, '_is_client'):\n self._is_client = hasattr(self, 'client')\n return self._is_client", "def has_client(self, ip):\n for cli in self.clients:\n if cli.ip == ip:\n return cli\n return None", "def is_virtual_network_host():\n return False", "def check(self, target, port):\n pass", "def local(self):\n return self.hostname == \"localhost\" and self.user is None and self.ssh_args is None", "def is_gentarget(self, target):\r\n raise NotImplementedError", "def supported_target(self, target, message_handler):\n\n # iOS can never be a host.\n return False", "def in_host():\n return not in_docker()", "def validate_target(target: str) -> bool:\n try:\n gethostbyname(target)\n except (gaierror, UnicodeError):\n return False\n return True", "def is_remote(state: 'JobState') -> bool:\n return state in [\n JobState.WAITING, JobState.WAITING_CR, JobState.RUNNING,\n JobState.RUNNING_CR\n ]", "def _is_remote_branch(self, branch_reference):\n return branch_reference.startswith(\"refs/remotes/\")", "def can_communicate_with(self, target):\n if self == target:\n return True\n msg = 'You try to connect topologies belonging to'\n msg += ' two different mpi tasks. Set taskids properly or use'\n msg += ' InterBridge.'\n assert self.task_id() == target.task_id(), msg\n\n # Parent communicator\n # Todo : define some proper conditions for compatibility\n # between topo_from, topo_to and parent:\n # - same size\n # - same domain\n # - common processus ...\n # At the time we check that both topo have\n # the same comm_origin.\n return self.is_consistent_with(target)", "def isClientMultiplexingInterface(self):\n adaptation = self.getServerAdaptationFunction()\n if adaptation == None:\n return False # no adaptatation underneath\n else:\n clientcount = adaptation.getClientCount() # max. number of clients; None means unlimited\n return (clientcount != 1)", "def is_remote(path: Text) -> bool:\n\n # TODO(Alex): add check for another remote storages (s3, ...) when they will be supported\n if path.startswith('gs://'):\n return True\n\n return False", "def use_remote_gateways(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"use_remote_gateways\")", "def use_remote_gateways(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"use_remote_gateways\")", "def supported_target(self, target, message_handler):\n\n # Android can never be a host.\n return False", "def IsTarget(self, target_name):\n return target_name in self.GetTargets()", "def test_z_remote_command(self):\n\t\ttheResult = False\n\t\ttry:\n\t\t\timport subprocess\n\t\t\ttheOutputtext = subprocess.check_output([\"which\", \"check_nrpe\"])\n\t\t\tif (str(\"/check_nrpe\") in str(theOutputtext)):\n\t\t\t\ttheResult = True\n\t\texcept Exception:\n\t\t\ttheResult = False\n\t\t\ttry:\n\t\t\t\ttheOutputtext = subprocess.check_output([\"which\", \"ssh\"])\n\t\t\t\tif (str(\"/ssh\") in str(theOutputtext)):\n\t\t\t\t\ttheResult = True\n\t\t\texcept Exception:\n\t\t\t\ttheResult = False\n\t\tassert theResult", "def remote(self):\n return self.getItunesAttribute('Track Type') == 'Remote'", "def getRemoteHost():", "def test_is_remote_target(self):\n self.site.mode = SITE_MODE_SOURCE\n self.site.save()\n self.assertEqual(self.project.is_remote(), True)", "def IsRemoteRerun(self):\n return self.IsRerun() and not self.IsLocalRerun()", "def is_remote_session():\n return os.environ.get('SSH_TTY', os.environ.get('SSH_CONNECTION'))", "def remote_publishing_slave():\n return remote_publishing() and hasattr(settings, 'NEWS_REMOTE_ROLE') \\\n and settings.NEWS_REMOTE_ROLE is 'SLAVE'", "def is_controlled(self):\n return False if self._remote_controller == \"\" else True", "def is_cups_server(rm):\n try:\n s = socket.socket()\n s.settimeout(0.3)\n s.connect((rm, 631))\n s.close()\n\n return True\n except (socket.error, socket.timeout):\n return False", "def _hostOK(self, host):\n if os.system(\"ping -c 1 $node &> /dev/null\"):\n # No access to host\n return False\n elif os.system(\"ssh -n -a -x $node 'ls' &> /dev/null\"):\n # No route to host\n return False\n else:\n return True", "def remote_publishing_master():\n return remote_publishing() and hasattr(settings, 'NEWS_REMOTE_ROLE') \\\n and settings.NEWS_REMOTE_ROLE is 'MASTER'", "def remote():\n pass", "def _check_connection() -> bool:\n return bool(subprocess.check_output([\"hostname\", \"-I\"]))", "def check(client: Client):\n pass", "def is_target(self):\n\t\treturn self.window and self.window.target is self", "def __check_ssh(self):\n sfcs = self.sshTunnelDict[\"target_ip\"]\n\n cmd = \"ps aux | grep ssh | awk '{print $20}'\"\n result = subprocess.Popen(cmd,\n shell= True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = result.communicate()\n if sfcs not in stdout.decode():\n return False\n else: return True", "def local_is_up(self, target):\n try:\n check_address(target)\n except ValueError:\n self.logger.warning('Target must be a tuple (IP, port), where IP '\n 'is a string (i.e. \"192.168.0.1\") and port is '\n 'an integer (i.e. 40000). Alternatively '\n 'target can be a valid UNIX domain socket.')\n return False\n\n self.check_tunnels()\n return self.tunnel_is_up.get(target, True)", "def use_remote_gateways(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"use_remote_gateways\")", "def is_peered_with(self, other: SkupperSite) -> bool:\n if not self.cluster.peering:\n return False\n\n for c in self.cluster.peering.connections:\n if (\n isinstance(\n c,\n (\n ClusterPeeringConnectionClusterRequesterV1,\n ClusterPeeringConnectionClusterAccepterV1,\n ),\n )\n ) and c.cluster.name == other.cluster.name:\n return True\n return False", "def _client_allowed(self):\r\n client_ip = self._client_address[0]\r\n if not client_ip in self._settings.allowed_clients and \\\r\n not 'ALL' in self._settings.allowed_clients:\r\n self._send_content('Access from host %s forbidden.' % client_ip, 'text/html')\r\n return False\r\n return True", "def _is_self(self, ip, port):\n import socket as sk\n self_ip = sk.gethostbyname(sk.gethostname())\n self_port = self.config['API_PORT']\n return str(self_ip) == ip and self_port == port", "def remote(self):\n return self.client_address", "def has_client(self):\n \n return len(self._clients) > 0", "def has_client(self):\n \n return len(self._clients) > 0", "def fingertip_no_remote(self) -> bool:\n hcell = self._get_hcell2()\n return hcell.get(\"fingertip_no_remote\", False)", "def _is_remote_reusable(inputs, calculation):\n can_use_remote = False\n #If no charge density file is available to restart from the calculation will except\n #with a not nice error message. So we can only reuse the charge density if these files are available\n retrieved_filenames = calculation.base.links.get_outgoing().get_node_by_label('retrieved').list_object_names()\n if any(file in retrieved_filenames for file in (\n 'cdn_last.hdf',\n 'cdn1',\n )):\n can_use_remote = True\n\n if 'fleurinp' in inputs:\n modes = inputs.fleurinp.get_fleur_modes()\n if modes['force_theorem'] or modes['dos'] or modes['band']:\n # in modes listed above it makes no sense copying cdn.hdf\n can_use_remote = False\n # without fleurinp it is harder to extract modes in this case\n # - simply try to reuse cdn.hdf and hope it works\n\n return can_use_remote", "def has_target(self):\n return self.target is not None", "def nremote(self):", "def isConnected():", "def get_on_tunnel(self):\n return self._is_on_tunnel", "def _is_sshd_server_running(self, timeout=1):\n try:\n self.ssh_client.connect(timeout=timeout)\n self.ssh_client.close()\n return True\n except Exception:\n return False", "def verify_as_host(self, target, message_handler):\n\n # Check we can host the target.\n if not self.supported_target(target, message_handler):\n raise UserException(\n \"{0} is not a supported {1} development host\".format(\n self.name, target.name))", "def _exists_remote(self, host):\n # This file gets written after cloudinit is done\n # path = '/var/lib/cloud/instance/boot-finished'\n path = '/home/ubuntu/SETUP_COMPLETE'\n t = 0\n sleep_len = 10\n while True:\n status = subprocess.call(\n ['ssh', '-oStrictHostKeyChecking=no', '-i', '/home/ubuntu/.ssh/id_rsa', 'ubuntu@'+host, 'test -f {}'.format(pipes.quote(path))])\n if status == 0:\n return True\n else:\n return False", "def supported_target(self, target, message_handler):\n\n # This default implementation checks that the architectures are the\n # same.\n return target is self", "def is_central_server() -> bool:\n return hasattr(Config().algorithm,\n 'cross_silo') and Config().args.port is None", "def can_proxy_restclient(request, service, url):\n if not hasattr(request, \"can_proxy_restclient\"):\n request.can_proxy_restclient = is_admin()\n return request.can_proxy_restclient", "def is_target( self ):\n\n raise NotImplementedError(\"is_target\");", "def is_client_alive(self, client):\n client_conn = self.all_clients[client]\n try:\n\n ping_message = Message(\"server\", client, \"utility\", \"ping\")\n client_conn.send(str.encode(ping_message.pack_to_json_string()))\n\n except Exception as e:\n print(\"Client communication error \" + str(e))\n return False\n return True", "def target_connected(self):\n return self.connected() and bool(self._dll.JLINKARM_IsConnected())", "def is_dialing(self) -> bool:", "def __call__(self, target, creds):\n\n return creds['is_admin'] == self.expected", "def _check_tunnel(self, _srv):\n if self.skip_tunnel_checkup:\n self.tunnel_is_up[_srv.local_address] = True\n return\n self.logger.info('Checking tunnel to: {0}'.format(_srv.remote_address))\n if isinstance(_srv.local_address, string_types): # UNIX stream\n s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n else:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.settimeout(TUNNEL_TIMEOUT)\n try:\n # Windows raises WinError 10049 if trying to connect to 0.0.0.0\n connect_to = ('127.0.0.1', _srv.local_port) \\\n if _srv.local_host == '0.0.0.0' else _srv.local_address\n s.connect(connect_to)\n self.tunnel_is_up[_srv.local_address] = _srv.tunnel_ok.get(\n timeout=TUNNEL_TIMEOUT * 1.1\n )\n self.logger.debug(\n 'Tunnel to {0} is DOWN'.format(_srv.remote_address)\n )\n except socket.error:\n self.logger.debug(\n 'Tunnel to {0} is DOWN'.format(_srv.remote_address)\n )\n self.tunnel_is_up[_srv.local_address] = False\n\n except queue.Empty:\n self.logger.debug(\n 'Tunnel to {0} is UP'.format(_srv.remote_address)\n )\n self.tunnel_is_up[_srv.local_address] = True\n finally:\n s.close()", "def has_target(self):\n return self._has_target", "def is_bot(self) -> bool:", "def prepareRemote(self, client, host):\n if not os.path.exists( client.location ) or not os.path.isdir( client.location ):\n raise Exception( \"The sources of client {0} should be found in local directory '{1}', but that either doesn't exist or is not a directory.\".format( client.name, client.location ) )\n if not source.prepareRemote(self, client, host):\n return False\n if self.isInCleanup():\n return\n host.sendFiles( client.location, self.remoteLocation(client, host) )\n return True", "def CheckIfConnecting(self):\n if self.CheckIfWiredConnecting() or self.CheckIfWirelessConnecting():\n return True\n else:\n return False", "def is_client(self) -> bool:\n return self.zone.SharedRoomID and not self.zone.MasterMode", "def isconnected(self) -> bool:", "def check_remote_pairing(ignore_errors):\n try:\n DeviceApi().get()\n return True\n except HTTPError as e:\n if e.response.status_code == 401:\n return False\n error = e\n except Exception as e:\n error = e\n\n LOG.warning('Could not get device info: {}'.format(repr(error)))\n\n if ignore_errors:\n return False\n\n if isinstance(error, HTTPError):\n if connected():\n raise BackendDown from error\n else:\n raise InternetDown from error\n else:\n raise error", "def active_failover_detect_host_now_follower(self):\n self.check_that_instance_is_alive()\n lfs = self.get_log_file()\n if lfs.find(\"resilientsingle up and running as follower\") >= 0:\n self.is_master = False\n return True\n return False", "def negotiation_should_advance(self):\n # Generally, this separates a bare TCP connect() from a True\n # RFC-compliant telnet client with responding IAC interpreter.\n server_do = sum(enabled for _, enabled in self.writer.remote_option.items())\n client_will = sum(enabled for _, enabled in self.writer.local_option.items())\n return bool(server_do or client_will)", "def client_exists(self, client=None):\n if type(client) is Client:\n return client.client_id in [c.client_id for c in self.client_list]\n else:\n return False", "def do_remote(self, *args):\n return self.do_scpi(':communicate:remote 1')", "def exists_remote(host, path):\n command = \"test -e \" + pipes.quote(path) + \" && echo 0 || echo 1\"\n (stdoutstring, stderrstring) = execute_ssh_command(host, port, USER, PASSWORD, None, None, command)\n\n for status in stdoutstring:\n if re.search('0', status):\n return True\n if re.search('1', status):\n return False", "def status_check(self):\n try:\n client = self.connect()\n client.sys.is_initialized() # make an actual network connection\n return True\n except:\n return False", "def proxy_check(self, proxy):", "async def is_server_live(self, headers: dict[str, t.Any] = ...) -> bool:", "def remoteDispatch(self):\n\n if self.ui.remoteDevice.currentText() == 'Built-In':\n self.app.remote.startRemote()\n self.app.message.emit('Remote enabled', 0)\n self.deviceStat['remote'] = True\n self.ui.remoteDevice.setStyleSheet(self.BACK_GREEN)\n else:\n self.app.remote.stopRemote()\n self.app.message.emit('Remote disabled', 0)\n self.deviceStat['remote'] = None\n self.ui.remoteDevice.setStyleSheet(self.BACK_NORM)\n\n return True", "def is_channel_owner():\n\n async def check(ctx):\n if ctx.guild:\n owner = ctx.author == ctx.guild.owner\n if not owner:\n await ctx.send(\"I guess you are not this server's pogchamp. Bruh.\")\n return owner\n return True\n\n return commands.check(check)", "def tunnel_up(self):\n return self._ssh_host != None and self._ssh_port != None", "def test_connectivity(verbose=False):\n\n connectable = None\n time_out = 1 # Number of seconds for timeout\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n try:\n ssh.connect(env.host, timeout=time_out)\n connectable = True\n except paramiko.AuthenticationException:\n print(\"Error: On host = {Host}, Authentication problem during connectivity test\"\n .format(Host = env.host))\n connectable = False\n except socket.error, e:\n print(\"Error: On host = {Host}, Communication problem during connectivity test\"\n .format(Host = env.host))\n connectable = False\n ssh.close()\n\n if verbose:\n print(\"{Host:4} | connectable?: {Connectable}\".format(Host=env.host, Connectable=connectable))\n\n return connectable", "def check_client(self):\n self.current_state = \"No Network\"\n if self.esp_mgr.ap:\n if self.client_socket:\n # client exists\n self.current_state = \"Connected\"\n if self.client_socket.connected():\n if self.client_socket.available():\n data = self.client_socket.recv()\n if data:\n self._add_to_buffer(data)\n else:\n self._close_client()\n if time.monotonic() > self.test_connection:\n data = bytes([0])\n self.send_to_client(data)\n else:\n self._close_client()\n \n else:\n # check for new client\n self.current_state = \"Listening port 23\"\n # reset termious hack\n self.termious = None\n client_sock_num = self.esp_mgr.esp.socket_available(self.server_socket.socknum)\n if client_sock_num != adafruit_esp32spi_socket.NO_SOCKET_AVAIL:\n # new connection\n self.current_state = \"Connected\"\n self.test_connection = time.monotonic() + 5\n self.client_socket = adafruit_esp32spi_socket.socket(socknum=client_sock_num)\n \n self.send_telnet_command([telnet_IAC, telnet_cmd_codes['WONT'], telnet_opt_codes['Echo']])\n self.send_telnet_command([telnet_IAC, telnet_cmd_codes['WONT'], telnet_opt_codes['Suppress GA']])\n return self.current_state", "def failover_target(self) -> bool:\n return pulumi.get(self, \"failover_target\")", "def isInternal(self):\n\n\t\t# TODO optimization do we really need to look at the host attributes?\n\t\t# maybe we can just use the global attribute (faster)\n\t\tfe = self.newdb.getFrontendName()\n\t\tnetwork = self.newdb.getHostAttr(fe, 'Kickstart_PrivateNetwork')\n\t\tnetmask = self.newdb.getHostAttr(fe, 'Kickstart_PrivateNetmask')\n\n\t\t# Test based on our client's IP address.\n\t\twork = string.split(network, '.')\n\t\tmask = string.split(netmask, '.')\n\t\tip = string.split(self.clientList[-1], '.')\n\n\t\tfor i in range(0, len(ip)):\n\t\t\ta = int(ip[i]) & int(mask[i])\n\t\t\tb = int(work[i]) & int(mask[i])\n\n\t\t\tif a != b:\n\t\t\t\treturn 0\n\n\t\treturn 1", "def test_connection():\n result = run(\"uname -a\")\n if result.failed:\n _pretty_output(\"Could not connect to remote server. Please check your configuration\")\n abort(\"Cannot continue. Aborting...\")", "def is_agent_listening(self, host, port):\n try:\n rv = False\n url = \"http://%s:%s/\" % (host, port)\n response = self.client.get(url, timeout=0.8)\n\n server_header = response.headers[\"Server\"]\n if server_header == AGENT_HEADER:\n logger.debug(\"Host agent found on %s:%d\", host, port)\n rv = True\n else:\n logger.debug(\"...something is listening on %s:%d but it's not the Instana Host Agent: %s\",\n host, port, server_header)\n except (requests.ConnectTimeout, requests.ConnectionError):\n logger.debug(\"Instana Host Agent not found on %s:%d\", host, port)\n rv = False\n finally:\n return rv", "def identify_client(self,protocol):\n if protocol.resident:\n return protocol.peer\n #pdb.set_trace()", "def exists_remote(host, path):\n status = subprocess.call(\n ['ssh', host, 'test -f {}'.format(pipes.quote(path))])\n if status == 0:\n return True\n if status == 1:\n return False\n raise Exception('SSH failed')", "def am_I_master(self, ipdict):\n hostname = socket.gethostname()\n ip_address = socket.gethostbyname(hostname)\n return ipdict.get(ip_address).is_master" ]
[ "0.7902426", "0.7443142", "0.7258655", "0.7257651", "0.7235301", "0.6850371", "0.6576049", "0.645614", "0.6447849", "0.640186", "0.6281278", "0.61393785", "0.60623175", "0.60600775", "0.600218", "0.59922236", "0.59744495", "0.59468746", "0.593498", "0.5931949", "0.59191805", "0.59143037", "0.59089434", "0.58839273", "0.5878677", "0.58627546", "0.58582056", "0.5856519", "0.5856519", "0.58564866", "0.58432937", "0.58410126", "0.5838809", "0.583191", "0.5826208", "0.5816134", "0.58152354", "0.58046013", "0.5782135", "0.5782129", "0.57705116", "0.5765156", "0.5743786", "0.5728485", "0.571958", "0.5716849", "0.56998736", "0.56840235", "0.56814307", "0.5677561", "0.5664955", "0.5651018", "0.56395876", "0.5601876", "0.5601876", "0.5598907", "0.55947596", "0.5588096", "0.5578669", "0.55418146", "0.5526711", "0.5521167", "0.55202985", "0.55166036", "0.55118346", "0.5504049", "0.54706943", "0.54629683", "0.54617083", "0.54614854", "0.5455172", "0.5439557", "0.5439429", "0.54358286", "0.5408998", "0.5402391", "0.5397989", "0.5387672", "0.536917", "0.53657514", "0.53569907", "0.53502756", "0.5347977", "0.5347895", "0.53436166", "0.53347045", "0.5328848", "0.53286105", "0.53116304", "0.53090984", "0.53067935", "0.53024024", "0.53022647", "0.52983165", "0.5295837", "0.5291137", "0.52871734", "0.52858603", "0.5285811", "0.52853465" ]
0.7590158
1
Check if origin is remote client
def is_origin_remote(): return sync_mode in (SyncMode.RECEIVER, SyncMode.PROXY, SyncMode.DUMP_REMOTE, SyncMode.IMPORT_REMOTE, SyncMode.SYNC_REMOTE)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_remote(client):\n if client == Client.ORIGIN:\n return is_origin_remote()\n elif client == Client.TARGET:\n return is_target_remote()\n elif client == Client.LOCAL:\n return False\n else:\n return False", "def is_remote(self):\n return False", "def is_remote(self): # -> Any | bool:\n ...", "def is_remote(self):\n raise NotImplementedError()", "def is_remote(self):\n if socket.gethostbyname(socket.gethostname()).startswith('10.7'):\n return False\n else:\n return True", "def remote(self):\r\n return self._url.scheme in ('http', 'https')", "def is_target_remote():\n return sync_mode in (SyncMode.SENDER, SyncMode.PROXY, SyncMode.DUMP_REMOTE,\n SyncMode.IMPORT_REMOTE, SyncMode.SYNC_REMOTE)", "def is_remote(self):\n\t\treturn bool(call_sdk_function('PrlVmDev_IsRemote', self.handle))", "def is_local_client(self):\n return self.msg.is_local_client", "def check_origin(self, origin):\n return True", "def check_origin(self, origin):\n return True", "def check_origin(self, origin):\n return True", "def check_origin(self, origin):\n return True", "def check_origin(self, origin):\n # import re\n # bool(re.match(r'^.*?\\.mydomain\\.com', origin))\n # allowed = super.check_origin(origin)\n if self.allow_origin == '*':\n return True\n\n host = self.request.headers.get(\"Host\")\n if origin is None:\n origin = self.request.headers.get(\"Origin\")\n\n # If no header is provided, assume we can't verify origin\n if origin is None:\n LOG.warning(\"user {0} Missing Origin header, rejecting WebSocket connection.\".format(self.client_id))\n return False\n if host is None:\n LOG.warning(\"user {0} Missing Host header, rejecting WebSocket connection.\".format(self.client_id))\n return False\n\n origin = origin.lower()\n origin_host = urlparse(origin).netloc\n\n # OK if origin matches host\n if origin_host == host:\n return True\n\n # Check CORS headers\n if self.allow_origin:\n allow = self.allow_origin == origin\n # elif self.allow_origin_pat:\n # allow = bool(self.allow_origin_pat.match(origin))\n else:\n # No CORS headers deny the request\n allow = False\n if not allow:\n LOG.warning(\"user {0} Blocking Cross Origin WebSocket Attempt. Origin: %s, Host: %s\",\n self.client_id, origin, host)\n return allow", "def test_is_remote_source(self):\n self.assertEqual(self.project.is_remote(), False)", "def is_remote(path: Text) -> bool:\n\n # TODO(Alex): add check for another remote storages (s3, ...) when they will be supported\n if path.startswith('gs://'):\n return True\n\n return False", "def test_is_remote(self):\n self.assertEqual(self.project.is_remote(), False)", "def local(self):\n return self.hostname == \"localhost\" and self.user is None and self.ssh_args is None", "def test_connection(remote=False):\n import socket\n remote_server = 'www.google.com' if not remote else remote # TODO: maybe improve for China\n try:\n # does the host name resolve?\n host = socket.gethostbyname(remote_server)\n # can we establish a connection to the host name?\n con = socket.create_connection((host, 80), 2)\n return True\n except:\n print(\"Can't connect to a server...\")\n pass\n return False", "def _is_remote_branch(self, branch_reference):\n return branch_reference.startswith(\"refs/remotes/\")", "def fingertip_no_remote(self) -> bool:\n hcell = self._get_hcell2()\n return hcell.get(\"fingertip_no_remote\", False)", "def remote(self):\n return self.getItunesAttribute('Track Type') == 'Remote'", "def is_host(self):\n return self.host", "def _is_remote_reusable(inputs, calculation):\n can_use_remote = False\n #If no charge density file is available to restart from the calculation will except\n #with a not nice error message. So we can only reuse the charge density if these files are available\n retrieved_filenames = calculation.base.links.get_outgoing().get_node_by_label('retrieved').list_object_names()\n if any(file in retrieved_filenames for file in (\n 'cdn_last.hdf',\n 'cdn1',\n )):\n can_use_remote = True\n\n if 'fleurinp' in inputs:\n modes = inputs.fleurinp.get_fleur_modes()\n if modes['force_theorem'] or modes['dos'] or modes['band']:\n # in modes listed above it makes no sense copying cdn.hdf\n can_use_remote = False\n # without fleurinp it is harder to extract modes in this case\n # - simply try to reuse cdn.hdf and hope it works\n\n return can_use_remote", "def is_remote(state: 'JobState') -> bool:\n return state in [\n JobState.WAITING, JobState.WAITING_CR, JobState.RUNNING,\n JobState.RUNNING_CR\n ]", "def is_remote_access_allowed(self, path: str):\n return self.public_path_marker.test(path) or self.is_public(path) and not self.is_private(path)", "def is_client(self):\n if not hasattr(self, '_is_client'):\n self._is_client = hasattr(self, 'client')\n return self._is_client", "def IsRemoteRerun(self):\n return self.IsRerun() and not self.IsLocalRerun()", "def is_central_server() -> bool:\n return hasattr(Config().algorithm,\n 'cross_silo') and Config().args.port is None", "def has_upstream_server(self) -> bool:\n return True if self.host is not None else False", "def remote_publishing_master():\n return remote_publishing() and hasattr(settings, 'NEWS_REMOTE_ROLE') \\\n and settings.NEWS_REMOTE_ROLE is 'MASTER'", "def valid_origin(self, parsed_origin):\n # None is not allowed unless all hosts are allowed\n if parsed_origin is None and \"*\" not in self.allowed_origins:\n return False\n return self.validate_origin(parsed_origin)", "def _is_self(self, ip, port):\n import socket as sk\n self_ip = sk.gethostbyname(sk.gethostname())\n self_port = self.config['API_PORT']\n return str(self_ip) == ip and self_port == port", "def getRemoteHost():", "async def is_server_live(self, headers: dict[str, t.Any] = ...) -> bool:", "def is_remote_session():\n return os.environ.get('SSH_TTY', os.environ.get('SSH_CONNECTION'))", "def check_if_same_host(host, url):\n # print '\\nchecking same origin:', host, get_host_name(url)\n\n if host == get_host_name(url):\n return True\n return False", "def _client_allowed(self):\r\n client_ip = self._client_address[0]\r\n if not client_ip in self._settings.allowed_clients and \\\r\n not 'ALL' in self._settings.allowed_clients:\r\n self._send_content('Access from host %s forbidden.' % client_ip, 'text/html')\r\n return False\r\n return True", "def isClientMultiplexingInterface(self):\n adaptation = self.getServerAdaptationFunction()\n if adaptation == None:\n return False # no adaptatation underneath\n else:\n clientcount = adaptation.getClientCount() # max. number of clients; None means unlimited\n return (clientcount != 1)", "def use_remote_gateways(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"use_remote_gateways\")", "def use_remote_gateways(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"use_remote_gateways\")", "def is_controlled(self):\n return False if self._remote_controller == \"\" else True", "def remote():\n pass", "def is_peered_with(self, other: SkupperSite) -> bool:\n if not self.cluster.peering:\n return False\n\n for c in self.cluster.peering.connections:\n if (\n isinstance(\n c,\n (\n ClusterPeeringConnectionClusterRequesterV1,\n ClusterPeeringConnectionClusterAccepterV1,\n ),\n )\n ) and c.cluster.name == other.cluster.name:\n return True\n return False", "def remote(self):\n return self.client_address", "def is_node_origin(self, node):\n return self.node_attribute(key=node, name=\"type\") == \"_origin\"", "def isTrustProxy(self):\n pass", "def detect_origin(self) -> typing.Optional[str]:\n origin: typing.Optional[str] = self.headers.get(\"Origin\")\n if not origin or \"://\" not in origin:\n return None\n\n _, origin_host = origin.split(\"://\", 1)\n if \":\" in origin_host:\n origin_host, _ = origin_host.split(\":\")\n\n search = self.path.replace(\"/\", \"\")\n if search.endswith(\".js\"):\n search = search[:-3]\n\n if origin and self.path and origin_host == search:\n return origin\n else:\n return None", "def is_connected(self):\n if self.server: return True\n return False", "def is_cups_server(rm):\n try:\n s = socket.socket()\n s.settimeout(0.3)\n s.connect((rm, 631))\n s.close()\n\n return True\n except (socket.error, socket.timeout):\n return False", "def proxy_check(self, proxy):", "def can_proxy_restclient(request, service, url):\n if not hasattr(request, \"can_proxy_restclient\"):\n request.can_proxy_restclient = is_admin()\n return request.can_proxy_restclient", "def access_control_allow_credentials(self):\n return \"Access-Control-Allow-Credentials\" in self.headers", "def check_https_get_remote_ip():\n if not (cherrypy.request.scheme == \"https\" or cherrypy.request.headers.get(\"x-forwarded-proto\") == \"https\"):\n cherrypy.log.error(\"slycat-standard-authentication.py authenticate\",\n \"cherrypy.HTTPError 403 secure connection required.\")\n raise cherrypy.HTTPError(\"403 Secure connection required.\")\n return cherrypy.request.headers.get(\n \"x-forwarded-for\") if \"x-forwarded-for\" in cherrypy.request.headers else cherrypy.request.rem", "def CheckIfConnecting(self):\n if self.CheckIfWiredConnecting() or self.CheckIfWirelessConnecting():\n return True\n else:\n return False", "def is_connected_to(self, receiver: SkupperSite) -> bool:\n return receiver in self.connected_sites", "def isLocal(self, connectionInfo):\n return False", "def _has_cors_header(self):\n return \"Access-Control-Request-Method\" in self.headers or \"Access-Control-Request-Headers\" in self.headers or \"Origin\" in self.headers", "def has_client(self, ip):\n for cli in self.clients:\n if cli.ip == ip:\n return cli\n return None", "def has_client(self):\n \n return len(self._clients) > 0", "def has_client(self):\n \n return len(self._clients) > 0", "def _check_connection() -> bool:\n return bool(subprocess.check_output([\"hostname\", \"-I\"]))", "def use_remote_gateways(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"use_remote_gateways\")", "def isconnected(self) -> bool:", "def test_is_remote_target(self):\n self.site.mode = SITE_MODE_SOURCE\n self.site.save()\n self.assertEqual(self.project.is_remote(), True)", "def isConnected():", "def same_origin(url1, url2):\n p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2)\n return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)", "def test_z_remote_command(self):\n\t\ttheResult = False\n\t\ttry:\n\t\t\timport subprocess\n\t\t\ttheOutputtext = subprocess.check_output([\"which\", \"check_nrpe\"])\n\t\t\tif (str(\"/check_nrpe\") in str(theOutputtext)):\n\t\t\t\ttheResult = True\n\t\texcept Exception:\n\t\t\ttheResult = False\n\t\t\ttry:\n\t\t\t\ttheOutputtext = subprocess.check_output([\"which\", \"ssh\"])\n\t\t\t\tif (str(\"/ssh\") in str(theOutputtext)):\n\t\t\t\t\ttheResult = True\n\t\t\texcept Exception:\n\t\t\t\ttheResult = False\n\t\tassert theResult", "def external(self) -> bool:\n return getattr(self.args, 'external', False) or self.is_sandbox", "def test_is_url_from_local_instance_returns_true_if_url_is_from_local_instance(\n self,\n ):\n # Arrange / Act\n return_value = BlobDownloader(\n f\"{settings.SERVER_URI}/987653456789\"\n ).is_url_from_local_instance()\n # Assert\n self.assertEqual(return_value, True)", "def is_local_res(\n url: str,\n resource_netloc: str,\n page_netlock: str,\n) -> bool:\n is_encoded = 'data:' in url\n is_local = not resource_netloc or resource_netloc == page_netlock\n return is_local and not is_encoded", "def is_test_site(request_object):\n if (request_object is None or\n request_object.META is None or\n (not request_object)):\n # happens on errors,\n # Should always do what the live site does in case of error.\n return False\n\n # Get current server name for this instance.\n # Could be the live server, test server, or local server\n # the local server_name changes depending on where it's accessed from.\n server_name = request_object.META['SERVER_NAME']\n \n return (server_name.startswith('test.') or # remote test site\n (server_name in settings.INTERNAL_IPS)) # local dev", "def remote_publishing_slave():\n return remote_publishing() and hasattr(settings, 'NEWS_REMOTE_ROLE') \\\n and settings.NEWS_REMOTE_ROLE is 'SLAVE'", "def is_connected(self):\r\n return self.__socket is not None", "def in_host():\n return not in_docker()", "def is_directly_updatable(credentials: Credentials) -> bool:\n if credentials.base_url == QE_URL:\n return True\n\n if credentials.base_url in (QCONSOLE_URL, QE2_URL, QCONSOLE2_URL):\n if credentials.base_url == credentials.url:\n return True\n\n return False", "def is_connected(self) -> bool:", "def nremote(self):", "def validate_origin(self, parsed_origin):\n return any(\n pattern == \"*\" or self.match_allowed_origin(parsed_origin, pattern)\n for pattern in self.allowed_origins\n )", "def net_check():\n resp = None\n host = \"https://gitlab.manjaro.org\"\n # noinspection PyBroadException\n try:\n resp = urllib.request.urlopen(host, timeout=2)\n except Exception:\n pass\n return bool(resp)", "def is_dialing(self) -> bool:", "def local(self):\r\n return self._url.scheme in ('', 'file')", "def status_check(self):\n try:\n client = self.connect()\n client.sys.is_initialized() # make an actual network connection\n return True\n except:\n return False", "def is_connected(self):\n return self._current_protocol is not None", "def isconnected(self) -> bool:\n ...", "def is_localhost() -> bool:\n if not has_request_context():\n return True\n\n host = request.headers.get(\"Host\", \"\")\n return host.startswith((\"localhost:\", \"127.0.0.1:\", \"0.0.0.0:\"))", "def pg_is_local(self) -> bool:\n query = queries.get(\"get_pga_inet_addresses\")\n ret = pg.fetchone(self.pg_conn, query)\n if ret[\"inet_server_addr\"] == ret[\"inet_client_addr\"]:\n return True\n return False", "def is_internal_relay(request, public_key):\n if (settings.DEBUG or\n request.META.get('REMOTE_ADDR', None) in settings.INTERNAL_IPS or\n public_key in settings.SENTRY_RELAY_WHITELIST_PK):\n return True\n return False", "def local_network_check():\n return (\n network.show_active() in LOCAL_BLOCKCHAIN_ENVINROMENTS\n or network.show_active() in FORKED_LOCAL_ENVIRONMENTS\n )", "def is_connected(self):\n return self.socket is not None and self.socket.connected and super(WebsocketTransport, self).is_connected()", "def _check_cors_headers(self, res):\r\n self.assertEqual(res.headers['access-control-allow-origin'], '*')\r\n self.assertEqual(\r\n res.headers['access-control-allow-headers'], 'X-Requested-With')", "def _check_cors_headers(self, res):\r\n self.assertEqual(res.headers['access-control-allow-origin'], '*')\r\n self.assertEqual(\r\n res.headers['access-control-allow-headers'], 'X-Requested-With')", "def is_connected(self):\n return self.hub.is_connected and self.client.is_running", "def is_connected(self):\n return False", "def allow_origins(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"allow_origins\")", "def is_connected(self) -> bool:\n return False if self._snitun is None else self._snitun.is_connected", "def check_connection(self):\n return False", "def authenticated(self):\n client_token = self.get_cookie(\"PA-client-token\")\n if not client_token:\n print(\"no cookie\")\n return False\n\n headers = cherrypy.request.headers\n if \"Remote-Addr\" not in headers:\n print(\"no IP\")\n return False\n\n to_hash = \"Python-Aboard \" + headers.get(\"Remote-Addr\", \"none\")\n to_hash += \" \" + headers.get(\"User-Agent\", \"unknown\")\n to_hash = to_hash.encode()\n token = hashlib.sha256(to_hash).digest()\n return client == client_token", "def is_connected(self):\n return self._socket is not None", "def is_connected(cls,socket):\n pass" ]
[ "0.79242504", "0.7800007", "0.7686724", "0.75528294", "0.747566", "0.7285064", "0.70139563", "0.6977031", "0.6942172", "0.6772315", "0.6772315", "0.6772315", "0.6772315", "0.67488825", "0.6476083", "0.6462293", "0.64356977", "0.63294023", "0.6260433", "0.6211482", "0.62091947", "0.6205737", "0.61427885", "0.61377716", "0.610265", "0.6073228", "0.60689443", "0.606794", "0.60434717", "0.6035247", "0.5999749", "0.5983107", "0.5971519", "0.5958124", "0.59029025", "0.5891216", "0.58899707", "0.58766335", "0.5861787", "0.58544374", "0.58544374", "0.5840889", "0.579721", "0.57884115", "0.5784865", "0.5782483", "0.577615", "0.5773404", "0.5750169", "0.57290995", "0.57281303", "0.571109", "0.5710947", "0.57049155", "0.56987226", "0.56953645", "0.5685941", "0.5685302", "0.5683728", "0.56728023", "0.56728023", "0.56628907", "0.5642296", "0.5628929", "0.5626361", "0.56117666", "0.56018066", "0.5594426", "0.5587504", "0.5559475", "0.5559226", "0.5549594", "0.5549427", "0.55449694", "0.5543099", "0.55211765", "0.5515424", "0.549584", "0.54808927", "0.54671955", "0.5465016", "0.5458123", "0.5453931", "0.54490876", "0.544512", "0.5444482", "0.54382217", "0.54303634", "0.5425872", "0.5417817", "0.5414913", "0.5414913", "0.5410436", "0.5406912", "0.53999245", "0.53933877", "0.53918135", "0.5391749", "0.53875166", "0.5384499" ]
0.8189138
0
Check if sync mode is import
def is_import(): return sync_mode in (SyncMode.IMPORT_LOCAL, SyncMode.IMPORT_REMOTE)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_sync_mode():\n global sync_mode\n _description = ''\n\n _modes = {\n SyncMode.RECEIVER: '(REMOTE ➔ LOCAL)',\n SyncMode.SENDER: '(LOCAL ➔ REMOTE)',\n SyncMode.PROXY: '(REMOTE ➔ LOCAL ➔ REMOTE)',\n SyncMode.DUMP_LOCAL: '(LOCAL, ONLY EXPORT)',\n SyncMode.DUMP_REMOTE: '(REMOTE, ONLY EXPORT)',\n SyncMode.IMPORT_LOCAL: '(REMOTE, ONLY IMPORT)',\n SyncMode.IMPORT_REMOTE: '(LOCAL, ONLY IMPORT)',\n SyncMode.SYNC_LOCAL: '(LOCAL ➔ LOCAL)',\n SyncMode.SYNC_REMOTE: '(REMOTE ➔ REMOTE)'\n }\n\n for _mode, _desc in _modes.items():\n if getattr(SyncMode, 'is_' + _mode.lower())():\n sync_mode = _mode\n _description = _desc\n\n if is_import():\n output.message(\n output.Subject.INFO,\n f'Import file {output.CliFormat.BLACK}{system.config[\"import\"]}{output.CliFormat.ENDC}',\n True\n )\n\n system.config['is_same_client'] = SyncMode.is_same_host()\n\n output.message(\n output.Subject.INFO,\n f'Sync mode: {sync_mode} {output.CliFormat.BLACK}{_description}{output.CliFormat.ENDC}',\n True\n )", "def is_import(self):\n return self.sh_info is None and (self.binding == 'STB_GLOBAL' or \\\n self.binding == 'STB_WEAK' or \\\n self.binding == 'STT_FUNC')", "def import_only(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"import_only\")", "def import_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"import_only\")", "def get_import_mode(self):\n\t\treturn self.buttonImport.get_active()", "def is_dump():\n return sync_mode in (SyncMode.DUMP_LOCAL, SyncMode.DUMP_REMOTE)", "def is_import_completion(self):\n current_line = self.get_current_line()\n\n # Seperate cases! More difficult than I thought\n match = re.match(r\"(import)|(from)\", current_line)\n if match:\n word_before = self.get_word_before()\n if word_before == \"from\" or word_before == \"import\":\n # Need to check for multiple imports! (TODO)\n return True\n\n return False", "def is_import_from_completion(self):\n\n current_line = self.get_current_line()\n\n match = re.match(r\"from .* import\", current_line)\n if match and self.get_word() != \"import\":\n return True\n\n return False", "def isSync(self):\n return False", "def detect_import(self):\n if self.contains_match(CONTAINS_IMPORT): self.es6import = True\n elif self.contains_match(CONTAINS_REQUIRE): self.es6import = False\n else: self.es6import = self.get_project_pref('detect_prefer_imports')", "def checkImport(self):\r\n for imp in self.cap_file.Import.packages:\r\n if a2s(imp.aid) not in export_refs:\r\n return False\r\n return True", "def checkIfImport():\n instance_ipath, product_ipath = getImportedPathes()\n product_ilist = [i for i in os.listdir(product_ipath) \\\n if osp.isfile(osp.join(product_ipath,i)) and i.endswith('.zexp')]\n if product_ilist:\n return 1\n return 0", "def imports(self):\n line = self.line.strip()\n if line.startswith('im'):\n if line.startswith('import') is False:\n return True\n elif line == '':\n return True", "def auto_import(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_import\")", "def auto_import(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_import\")", "def auto_import(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"auto_import\")", "def set_import_mode(self, flag):\n\t\tif self.buttonImport.get_active() and not flag:\n\t\t\tself.buttonImport.set_active(False)\n\t\t\treturn True\n\t\telif not self.buttonImport.get_active() and flag:\n\t\t\tself.buttonImport.set_active(True)\n\t\t\treturn True\n\t\treturn False", "def is_migrated_before():\n\n global migration_sign\n if os.path.exists(migration_sign):\n return True\n else:\n return False", "def has_import_permission(self, request):\n opts = self.opts\n codename = get_permission_codename('import', opts)\n return request.user.has_perm(\"%s.%s\" % (opts.app_label, codename))", "def get_sync_mode():\n return sync_mode", "def get_auto_start_import(self):\n\t\treturn self.checkAutoStartImport.get_active()", "def is_already_import_function(self, fn):\n if isinstance(fn, basestring):\n for descriptor in self.import_entries:\n for import_element in descriptor.imports:\n if import_element.name == fn:\n return True\n\n elif isinstance(fn, int):\n # TODO : add ordinary import\n pass\n return False", "def is_first_synced(self):\n return True", "def is_imported():\n return len(inspect.stack()) > 3", "def isLocal(self, connectionInfo):\n return False", "def is_func_imported(self, ea):\n # If address is located in IAT\n if ea in self.rt_import_table:\n return True\n\n return False", "def imported(module):\n try:\n if module not in sys.modules:\n __import__(module)\n return 'enabled'\n except:\n return '-'", "async def test_import_exist(hass):\n mocked_device = _create_mocked_device()\n _create_mock_config_entry(hass)\n\n with _patch_config_flow_device(mocked_device):\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": SOURCE_IMPORT}, data=CONF_DATA\n )\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"already_configured\"\n\n mocked_device.get_supported_methods.assert_called_once()\n mocked_device.get_interface_information.assert_not_called()", "def is_local(self) -> bool:\n if not self.source:\n return False\n\n if self.source.master_name.startswith(MODULE_NAME):\n return True\n\n if self.is_type_defs():\n return True\n\n return False", "def _local_install(self):\n config = self._config\n ext = config.plugins[self.full_name].get('pkg_extension', '')\n if not ext:\n return False\n\n # ensure extension begins with a dot\n ext = '.{0}'.format(ext.lstrip('.'))\n\n return config.context.package.arg.endswith(ext)", "def allow_syncdb(self, db, model):\n return True", "def allow_syncdb(self, db, model):\n return True", "def upload_only_when_stable(self):\n return os.getenv(\"CONAN_UPLOAD_ONLY_WHEN_STABLE\", \"True\").lower() in [\"true\", \"1\", \"yes\"]", "def IsSynchronized(self) -> bool:", "def can_import(name):\n try:\n __import__(name)\n return True\n except ImportError:\n return False", "def get_import_status(self):\n return AsyncResult(self.import_task_id).state", "def getSyncInfo (self, connection) :\r\n \r\n if self.state != 'valid' :\r\n return False\r\n \r\n if self.sync_target :\r\n return False\r\n \r\n self.state = 'recv_sync'\r\n self.sync_target = connection\r\n self.do_sync_get()\r\n \r\n return True", "def __check_mode(self):\n self.mode[\"auto_mode\"] = self.communications.get_mode()", "def is_export(self):\n return self.sh_info is not None and (self.binding == 'STB_GLOBAL' or \\\n self.binding == 'STB_WEAK')", "def check_enable_mode(self, *args, **kwargs):\n pass", "def supports_ordinary_make_module_imports(self):\n return True", "def check_config_mode(self):\n return False", "def testImport(self):\n success = False\n try:\n from cutlass import DiseaseMeta\n success = True\n except:\n pass\n\n self.failUnless(success)\n self.failIf(DiseaseMeta is None)", "def test_is_not_local_dev_from_presence(self):\n\n expected = False\n actual = Version.is_local_dev()\n\n self.assertEqual(expected, actual)", "def getSyncFor (self, conn) :\r\n for pw, _conn in self.clients :\r\n if _conn and _conn.getSyncInfo(conn) :\r\n self.ongoing_sync_count += 1\r\n return True\r\n \r\n return False", "def _test_context_changed(self, request, import_):\n #\n reprocessing_triggers = (\"category\", \"protocol\", \"file_format\")\n\n for key in reprocessing_triggers:\n if key in request.data and request.data[key] != getattr(import_, key):\n return True\n return False", "def is_delta_sync_enabled(cluster_config):\n\n cluster = load_cluster_config_json(cluster_config)\n try:\n return cluster[\"environment\"][\"delta_sync_enabled\"]\n except KeyError:\n return False", "def is_unreal():\n\n try:\n import unreal\n except ImportError:\n return False\n\n return True", "def in_maintenance_mode():\n return os.path.exists(\"maintenance.txt\")", "def is_target_remote():\n return sync_mode in (SyncMode.SENDER, SyncMode.PROXY, SyncMode.DUMP_REMOTE,\n SyncMode.IMPORT_REMOTE, SyncMode.SYNC_REMOTE)", "def check(self, context):\r\n return context.config.preset is not None", "def is_already_import_dll(self, dll_name):\n for descriptor in self.import_entries:\n if descriptor.dll == dll_name:\n return True\n return False", "def _check_trigger_sync(pre_image, image):\n return pre_image.status in ('saving', 'queued') and image.size and \\\n [l for l in image.locations if not utils.is_glance_location(l['url'])]", "def test_is_not_local_dev(self):\n\n expected = False\n actual = Version.is_local_dev()\n\n self.assertEqual(expected, actual)", "def test_is_local_dev(self):\n\n expected = True\n actual = Version.is_local_dev()\n\n self.assertEqual(expected, actual)", "def check_gpsync_running(options):\n \n return gp.getSyncmasterPID('localhost', options.master_data_dir) > 0", "def allow_syncdb(self, db, model):\n\n return True", "def _import_module(self, name):\r\n try:\r\n __import__(name)\r\n return True\r\n except ImportError:\r\n return False", "def set_auto_start_import(self, flag):\n\t\tself.checkAutoStartImport.set_active(flag)", "def hasStartSyncReceived(self):\r\n\r\n return self.receiver.hasStartSyncReceived()", "def available(self):\n return not os.path.exists(self.lockfile)", "def needs_sync(self):\n\n affected_attributes = [\n 'css_files', 'js_files',\n 'scss_files', 'widgets']\n\n for attr in affected_attributes:\n if len(getattr(self, attr)) > 0:\n return True\n return False", "def _get_ldp_sync_enabled(self):\n return self.__ldp_sync_enabled", "def reimport_eligible(self) -> bool:\n return pulumi.get(self, \"reimport_eligible\")", "def is_profile_sync(self, profile: Profile, raise_if_not_sync=False):\n try:\n linked_pi_list = [ip.identifier for ip in profile.list_linked_packages()]\n needed_pi_list = [ip.identifier for ip in self.get_profile_dependencies(profile)]\n for pi in needed_pi_list:\n if pi not in linked_pi_list:\n raise LeafException(\"Missing package link for {pi}\".format(pi=pi))\n for pi in linked_pi_list:\n if pi not in needed_pi_list:\n raise LeafException(\"Package should not be linked: {pi}\".format(pi=pi))\n except Exception as e:\n if raise_if_not_sync:\n raise ProfileOutOfSyncException(profile, cause=e)\n self.logger.print_verbose(str(e))\n return False\n return True", "def on_check_auto_start_import_toggled(self, checkBox):\n\t\tself.emit('auto-start-import-changed')", "def on_premises_sync_enabled(self):\n if \"onPremisesSyncEnabled\" in self._prop_dict:\n return self._prop_dict[\"onPremisesSyncEnabled\"]\n else:\n return None", "def on_premises_sync_enabled(self):\n if \"onPremisesSyncEnabled\" in self._prop_dict:\n return self._prop_dict[\"onPremisesSyncEnabled\"]\n else:\n return None", "def is_loaded(self):\n return os.path.exists(IPMIService.IPMI_DEV)", "async def async_step_import(self, import_info: dict[str, Any]) -> FlowResult:\n import_info.pop(CONF_MONITORED_CONDITIONS, None)\n import_info.pop(CONF_NICS, None)\n import_info.pop(CONF_DRIVES, None)\n import_info.pop(CONF_VOLUMES, None)\n return await self.async_step_user(import_info)", "def enabled(cls):\n return (cls is not Extension)", "def _try_import(self, import_fxn, src, exc):\n try:\n p, v, o = import_fxn(src)\n except exc:\n return False\n\n self.project = p\n self.version = v\n self.objects = o\n\n return True", "def is_local(self):\n try:\n return os.path.isfile(self.get_absolute_path())\n except ValueError:\n logger.error(\"'%s' is not a file\", self.get_absolute_path())\n except TypeError: # no datafile available or file does not exist\n pass\n return False", "def workflow_loaded(self):\n return bool(not self._gdb_interface.empty())", "def is_system(self) -> bool:", "def __bool__(self):\n return self.installed", "def has_set_up_py_in(self):\n return (self.version_info >= (4, 10))", "def ezimport_ln_s(self) -> bool:\n\n cli = CLI()\n cli.register('import', ImportControl, '_')\n cli.register('sessions', SessionsControl, '_')\n cli.invoke(['import',\n '-k', self.conn.getSession().getUuid().val,\n '-s', self.conn.host,\n '-p', str(self.conn.port),\n '--transfer', 'ln_s',\n str(self.file_path)])\n if cli.rv == 0:\n self.imported = True\n print(f'Imported {self.file_path}')\n return True\n else:\n logging.error(f'Import of {self.file_path} has failed!')\n return False", "def is_mod(self) -> bool:\n if self._mod == 1:\n return True\n if self.channel.name == self.display_name.lower():\n return True\n else:\n return False", "def test_importtleCommandExists(self):\n self.assertIn('importtle', get_commands())", "def check_enable_mode(self, check_string='#'):\n return True", "def is_origin_remote():\n return sync_mode in (SyncMode.RECEIVER, SyncMode.PROXY, SyncMode.DUMP_REMOTE,\n SyncMode.IMPORT_REMOTE, SyncMode.SYNC_REMOTE)", "def isLoaded(self,modFile):\n return (modFile in self.loadFiles)", "async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:\n conf = config.get(DOMAIN)\n hass.data.setdefault(DOMAIN, {})\n\n if not conf:\n return True\n\n hass.async_create_task(\n hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": SOURCE_IMPORT}, data=conf\n )\n )\n return True", "def is_exporting(self):\n return self.get_json_active_thread_count() > 0", "def is_synchronized(self):\r\n sync_state = True\r\n \r\n for particle in self.population:\r\n sync_state = (sync_state and particle.sync)\r\n \r\n if not sync_state:\r\n break;\r\n \r\n return sync_state", "async def casino_is_global(self):\n return await self.db.Settings.Global()", "def _check_for_sync(self, fl_name):\n fl_sync = True\n # Get the list of flavors names to sync.\n fl_wlist = self.get_flavors_white_list()\n fl_blist = self.get_flavors_black_list()\n\n if (len(fl_wlist) != 0):\n fl_sync = self._regex_comp(fl_name, fl_wlist)\n if (fl_sync and (len(fl_blist) != 0)):\n fl_sync = not(self._regex_comp(fl_name, fl_blist))\n return fl_sync", "def is_local(self):\n if not \"COLLABORATIVE\" in self._file.upper():\n LOGGER.debug(['AIE4606', 'match_false'], {'file': self._file})\n return True\n else:\n LOGGER.debug(['AIE4607', 'match_true'], {'file': self._file})\n return False\n return self._is_local", "def _IsDjangoProject(self):\r\n \r\n manage_file, settings_file = self._FindKeyFiles()\r\n if manage_file is None or settings_file is None:\r\n return False\r\n \r\n return True", "def _global_development_mode() -> bool:\n return (\n not env_util.is_pex()\n and \"site-packages\" not in __file__\n and \"dist-packages\" not in __file__\n and \"__pypackages__\" not in __file__\n )", "def _pre_sync(self):", "async def test_import_invalid(hass):\n mocked_device = _create_mocked_device(True)\n _create_mock_config_entry(hass)\n\n with _patch_config_flow_device(mocked_device):\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": SOURCE_IMPORT}, data=CONF_DATA\n )\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"connection\"\n\n mocked_device.get_supported_methods.assert_called_once()\n mocked_device.get_interface_information.assert_not_called()", "def is_setup_connected(self):\n return bool(self.get_target_namespace())", "def determine_should_sync(\n self, src_file: Optional[FileStats], dest_file: Optional[FileStats]\n ) -> bool:\n if dest_file:\n dest_file.operation_name = \"delete\"\n LOGGER.debug(\n \"syncing: (None) -> %s (remove), file does not \"\n \"exist at source (%s) and delete mode enabled\",\n dest_file.src if dest_file else None,\n dest_file.dest if dest_file else None,\n )\n return True", "def is_loaded_module(self, module_name):\n\n for (module, ea, name, ord) in self.rt_import_table.values():\n if module == module_name:\n return True\n return False", "def is_managed(self):\n return getattr(self.local, 'managed', False)", "def sync(type, all):\n print(\"Syncing\")", "def is_discord_file(obj):\n return (obj.__class__.__name__) == \"File\"", "def isTx(self):\n\t\treturn self.extension == '.tx'" ]
[ "0.75585586", "0.69149435", "0.665945", "0.64593935", "0.6442651", "0.63151574", "0.63039666", "0.6274873", "0.6236776", "0.6194281", "0.6192882", "0.6032196", "0.59927666", "0.59285295", "0.59285295", "0.59168273", "0.5914921", "0.58185005", "0.57003963", "0.5684882", "0.56790406", "0.56752753", "0.5652213", "0.55742455", "0.5552539", "0.5551295", "0.5519939", "0.5516505", "0.5488701", "0.5475263", "0.5450379", "0.5450379", "0.54391646", "0.53962195", "0.53884214", "0.538781", "0.5386145", "0.5358668", "0.5340932", "0.533837", "0.53283596", "0.5321225", "0.5308533", "0.5307136", "0.52977836", "0.5297199", "0.52932847", "0.5291189", "0.5269965", "0.52684015", "0.5267621", "0.5256897", "0.52550626", "0.5247674", "0.523908", "0.5234402", "0.52315897", "0.52179563", "0.5212075", "0.5211245", "0.52060485", "0.51905113", "0.51826376", "0.5182336", "0.5182152", "0.51801735", "0.5159473", "0.5159473", "0.5151841", "0.5140254", "0.5138527", "0.51362157", "0.5134738", "0.5131407", "0.5131198", "0.51290756", "0.512328", "0.51211494", "0.51085824", "0.5105414", "0.5102252", "0.5102201", "0.50924295", "0.5080459", "0.50679964", "0.50621086", "0.5059412", "0.50577724", "0.50567234", "0.5038386", "0.50351137", "0.50281715", "0.502762", "0.5023896", "0.50225425", "0.5020108", "0.5019279", "0.5018155", "0.50111026", "0.5010759" ]
0.9084145
0
Check if sync mode is import
def is_dump(): return sync_mode in (SyncMode.DUMP_LOCAL, SyncMode.DUMP_REMOTE)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_import():\n return sync_mode in (SyncMode.IMPORT_LOCAL, SyncMode.IMPORT_REMOTE)", "def check_sync_mode():\n global sync_mode\n _description = ''\n\n _modes = {\n SyncMode.RECEIVER: '(REMOTE ➔ LOCAL)',\n SyncMode.SENDER: '(LOCAL ➔ REMOTE)',\n SyncMode.PROXY: '(REMOTE ➔ LOCAL ➔ REMOTE)',\n SyncMode.DUMP_LOCAL: '(LOCAL, ONLY EXPORT)',\n SyncMode.DUMP_REMOTE: '(REMOTE, ONLY EXPORT)',\n SyncMode.IMPORT_LOCAL: '(REMOTE, ONLY IMPORT)',\n SyncMode.IMPORT_REMOTE: '(LOCAL, ONLY IMPORT)',\n SyncMode.SYNC_LOCAL: '(LOCAL ➔ LOCAL)',\n SyncMode.SYNC_REMOTE: '(REMOTE ➔ REMOTE)'\n }\n\n for _mode, _desc in _modes.items():\n if getattr(SyncMode, 'is_' + _mode.lower())():\n sync_mode = _mode\n _description = _desc\n\n if is_import():\n output.message(\n output.Subject.INFO,\n f'Import file {output.CliFormat.BLACK}{system.config[\"import\"]}{output.CliFormat.ENDC}',\n True\n )\n\n system.config['is_same_client'] = SyncMode.is_same_host()\n\n output.message(\n output.Subject.INFO,\n f'Sync mode: {sync_mode} {output.CliFormat.BLACK}{_description}{output.CliFormat.ENDC}',\n True\n )", "def is_import(self):\n return self.sh_info is None and (self.binding == 'STB_GLOBAL' or \\\n self.binding == 'STB_WEAK' or \\\n self.binding == 'STT_FUNC')", "def import_only(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"import_only\")", "def import_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"import_only\")", "def get_import_mode(self):\n\t\treturn self.buttonImport.get_active()", "def is_import_completion(self):\n current_line = self.get_current_line()\n\n # Seperate cases! More difficult than I thought\n match = re.match(r\"(import)|(from)\", current_line)\n if match:\n word_before = self.get_word_before()\n if word_before == \"from\" or word_before == \"import\":\n # Need to check for multiple imports! (TODO)\n return True\n\n return False", "def is_import_from_completion(self):\n\n current_line = self.get_current_line()\n\n match = re.match(r\"from .* import\", current_line)\n if match and self.get_word() != \"import\":\n return True\n\n return False", "def isSync(self):\n return False", "def detect_import(self):\n if self.contains_match(CONTAINS_IMPORT): self.es6import = True\n elif self.contains_match(CONTAINS_REQUIRE): self.es6import = False\n else: self.es6import = self.get_project_pref('detect_prefer_imports')", "def checkImport(self):\r\n for imp in self.cap_file.Import.packages:\r\n if a2s(imp.aid) not in export_refs:\r\n return False\r\n return True", "def checkIfImport():\n instance_ipath, product_ipath = getImportedPathes()\n product_ilist = [i for i in os.listdir(product_ipath) \\\n if osp.isfile(osp.join(product_ipath,i)) and i.endswith('.zexp')]\n if product_ilist:\n return 1\n return 0", "def imports(self):\n line = self.line.strip()\n if line.startswith('im'):\n if line.startswith('import') is False:\n return True\n elif line == '':\n return True", "def auto_import(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_import\")", "def auto_import(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_import\")", "def auto_import(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"auto_import\")", "def set_import_mode(self, flag):\n\t\tif self.buttonImport.get_active() and not flag:\n\t\t\tself.buttonImport.set_active(False)\n\t\t\treturn True\n\t\telif not self.buttonImport.get_active() and flag:\n\t\t\tself.buttonImport.set_active(True)\n\t\t\treturn True\n\t\treturn False", "def is_migrated_before():\n\n global migration_sign\n if os.path.exists(migration_sign):\n return True\n else:\n return False", "def has_import_permission(self, request):\n opts = self.opts\n codename = get_permission_codename('import', opts)\n return request.user.has_perm(\"%s.%s\" % (opts.app_label, codename))", "def get_sync_mode():\n return sync_mode", "def get_auto_start_import(self):\n\t\treturn self.checkAutoStartImport.get_active()", "def is_already_import_function(self, fn):\n if isinstance(fn, basestring):\n for descriptor in self.import_entries:\n for import_element in descriptor.imports:\n if import_element.name == fn:\n return True\n\n elif isinstance(fn, int):\n # TODO : add ordinary import\n pass\n return False", "def is_first_synced(self):\n return True", "def is_imported():\n return len(inspect.stack()) > 3", "def isLocal(self, connectionInfo):\n return False", "def is_func_imported(self, ea):\n # If address is located in IAT\n if ea in self.rt_import_table:\n return True\n\n return False", "def imported(module):\n try:\n if module not in sys.modules:\n __import__(module)\n return 'enabled'\n except:\n return '-'", "async def test_import_exist(hass):\n mocked_device = _create_mocked_device()\n _create_mock_config_entry(hass)\n\n with _patch_config_flow_device(mocked_device):\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": SOURCE_IMPORT}, data=CONF_DATA\n )\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"already_configured\"\n\n mocked_device.get_supported_methods.assert_called_once()\n mocked_device.get_interface_information.assert_not_called()", "def is_local(self) -> bool:\n if not self.source:\n return False\n\n if self.source.master_name.startswith(MODULE_NAME):\n return True\n\n if self.is_type_defs():\n return True\n\n return False", "def _local_install(self):\n config = self._config\n ext = config.plugins[self.full_name].get('pkg_extension', '')\n if not ext:\n return False\n\n # ensure extension begins with a dot\n ext = '.{0}'.format(ext.lstrip('.'))\n\n return config.context.package.arg.endswith(ext)", "def allow_syncdb(self, db, model):\n return True", "def allow_syncdb(self, db, model):\n return True", "def upload_only_when_stable(self):\n return os.getenv(\"CONAN_UPLOAD_ONLY_WHEN_STABLE\", \"True\").lower() in [\"true\", \"1\", \"yes\"]", "def IsSynchronized(self) -> bool:", "def can_import(name):\n try:\n __import__(name)\n return True\n except ImportError:\n return False", "def get_import_status(self):\n return AsyncResult(self.import_task_id).state", "def getSyncInfo (self, connection) :\r\n \r\n if self.state != 'valid' :\r\n return False\r\n \r\n if self.sync_target :\r\n return False\r\n \r\n self.state = 'recv_sync'\r\n self.sync_target = connection\r\n self.do_sync_get()\r\n \r\n return True", "def __check_mode(self):\n self.mode[\"auto_mode\"] = self.communications.get_mode()", "def is_export(self):\n return self.sh_info is not None and (self.binding == 'STB_GLOBAL' or \\\n self.binding == 'STB_WEAK')", "def check_enable_mode(self, *args, **kwargs):\n pass", "def supports_ordinary_make_module_imports(self):\n return True", "def check_config_mode(self):\n return False", "def testImport(self):\n success = False\n try:\n from cutlass import DiseaseMeta\n success = True\n except:\n pass\n\n self.failUnless(success)\n self.failIf(DiseaseMeta is None)", "def test_is_not_local_dev_from_presence(self):\n\n expected = False\n actual = Version.is_local_dev()\n\n self.assertEqual(expected, actual)", "def getSyncFor (self, conn) :\r\n for pw, _conn in self.clients :\r\n if _conn and _conn.getSyncInfo(conn) :\r\n self.ongoing_sync_count += 1\r\n return True\r\n \r\n return False", "def _test_context_changed(self, request, import_):\n #\n reprocessing_triggers = (\"category\", \"protocol\", \"file_format\")\n\n for key in reprocessing_triggers:\n if key in request.data and request.data[key] != getattr(import_, key):\n return True\n return False", "def is_delta_sync_enabled(cluster_config):\n\n cluster = load_cluster_config_json(cluster_config)\n try:\n return cluster[\"environment\"][\"delta_sync_enabled\"]\n except KeyError:\n return False", "def is_unreal():\n\n try:\n import unreal\n except ImportError:\n return False\n\n return True", "def in_maintenance_mode():\n return os.path.exists(\"maintenance.txt\")", "def is_target_remote():\n return sync_mode in (SyncMode.SENDER, SyncMode.PROXY, SyncMode.DUMP_REMOTE,\n SyncMode.IMPORT_REMOTE, SyncMode.SYNC_REMOTE)", "def check(self, context):\r\n return context.config.preset is not None", "def is_already_import_dll(self, dll_name):\n for descriptor in self.import_entries:\n if descriptor.dll == dll_name:\n return True\n return False", "def _check_trigger_sync(pre_image, image):\n return pre_image.status in ('saving', 'queued') and image.size and \\\n [l for l in image.locations if not utils.is_glance_location(l['url'])]", "def test_is_not_local_dev(self):\n\n expected = False\n actual = Version.is_local_dev()\n\n self.assertEqual(expected, actual)", "def test_is_local_dev(self):\n\n expected = True\n actual = Version.is_local_dev()\n\n self.assertEqual(expected, actual)", "def check_gpsync_running(options):\n \n return gp.getSyncmasterPID('localhost', options.master_data_dir) > 0", "def allow_syncdb(self, db, model):\n\n return True", "def _import_module(self, name):\r\n try:\r\n __import__(name)\r\n return True\r\n except ImportError:\r\n return False", "def set_auto_start_import(self, flag):\n\t\tself.checkAutoStartImport.set_active(flag)", "def hasStartSyncReceived(self):\r\n\r\n return self.receiver.hasStartSyncReceived()", "def available(self):\n return not os.path.exists(self.lockfile)", "def needs_sync(self):\n\n affected_attributes = [\n 'css_files', 'js_files',\n 'scss_files', 'widgets']\n\n for attr in affected_attributes:\n if len(getattr(self, attr)) > 0:\n return True\n return False", "def _get_ldp_sync_enabled(self):\n return self.__ldp_sync_enabled", "def reimport_eligible(self) -> bool:\n return pulumi.get(self, \"reimport_eligible\")", "def is_profile_sync(self, profile: Profile, raise_if_not_sync=False):\n try:\n linked_pi_list = [ip.identifier for ip in profile.list_linked_packages()]\n needed_pi_list = [ip.identifier for ip in self.get_profile_dependencies(profile)]\n for pi in needed_pi_list:\n if pi not in linked_pi_list:\n raise LeafException(\"Missing package link for {pi}\".format(pi=pi))\n for pi in linked_pi_list:\n if pi not in needed_pi_list:\n raise LeafException(\"Package should not be linked: {pi}\".format(pi=pi))\n except Exception as e:\n if raise_if_not_sync:\n raise ProfileOutOfSyncException(profile, cause=e)\n self.logger.print_verbose(str(e))\n return False\n return True", "def on_check_auto_start_import_toggled(self, checkBox):\n\t\tself.emit('auto-start-import-changed')", "def on_premises_sync_enabled(self):\n if \"onPremisesSyncEnabled\" in self._prop_dict:\n return self._prop_dict[\"onPremisesSyncEnabled\"]\n else:\n return None", "def on_premises_sync_enabled(self):\n if \"onPremisesSyncEnabled\" in self._prop_dict:\n return self._prop_dict[\"onPremisesSyncEnabled\"]\n else:\n return None", "def is_loaded(self):\n return os.path.exists(IPMIService.IPMI_DEV)", "async def async_step_import(self, import_info: dict[str, Any]) -> FlowResult:\n import_info.pop(CONF_MONITORED_CONDITIONS, None)\n import_info.pop(CONF_NICS, None)\n import_info.pop(CONF_DRIVES, None)\n import_info.pop(CONF_VOLUMES, None)\n return await self.async_step_user(import_info)", "def enabled(cls):\n return (cls is not Extension)", "def _try_import(self, import_fxn, src, exc):\n try:\n p, v, o = import_fxn(src)\n except exc:\n return False\n\n self.project = p\n self.version = v\n self.objects = o\n\n return True", "def is_local(self):\n try:\n return os.path.isfile(self.get_absolute_path())\n except ValueError:\n logger.error(\"'%s' is not a file\", self.get_absolute_path())\n except TypeError: # no datafile available or file does not exist\n pass\n return False", "def workflow_loaded(self):\n return bool(not self._gdb_interface.empty())", "def is_system(self) -> bool:", "def __bool__(self):\n return self.installed", "def has_set_up_py_in(self):\n return (self.version_info >= (4, 10))", "def ezimport_ln_s(self) -> bool:\n\n cli = CLI()\n cli.register('import', ImportControl, '_')\n cli.register('sessions', SessionsControl, '_')\n cli.invoke(['import',\n '-k', self.conn.getSession().getUuid().val,\n '-s', self.conn.host,\n '-p', str(self.conn.port),\n '--transfer', 'ln_s',\n str(self.file_path)])\n if cli.rv == 0:\n self.imported = True\n print(f'Imported {self.file_path}')\n return True\n else:\n logging.error(f'Import of {self.file_path} has failed!')\n return False", "def is_mod(self) -> bool:\n if self._mod == 1:\n return True\n if self.channel.name == self.display_name.lower():\n return True\n else:\n return False", "def test_importtleCommandExists(self):\n self.assertIn('importtle', get_commands())", "def check_enable_mode(self, check_string='#'):\n return True", "def is_origin_remote():\n return sync_mode in (SyncMode.RECEIVER, SyncMode.PROXY, SyncMode.DUMP_REMOTE,\n SyncMode.IMPORT_REMOTE, SyncMode.SYNC_REMOTE)", "def isLoaded(self,modFile):\n return (modFile in self.loadFiles)", "async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:\n conf = config.get(DOMAIN)\n hass.data.setdefault(DOMAIN, {})\n\n if not conf:\n return True\n\n hass.async_create_task(\n hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": SOURCE_IMPORT}, data=conf\n )\n )\n return True", "def is_exporting(self):\n return self.get_json_active_thread_count() > 0", "def is_synchronized(self):\r\n sync_state = True\r\n \r\n for particle in self.population:\r\n sync_state = (sync_state and particle.sync)\r\n \r\n if not sync_state:\r\n break;\r\n \r\n return sync_state", "async def casino_is_global(self):\n return await self.db.Settings.Global()", "def _check_for_sync(self, fl_name):\n fl_sync = True\n # Get the list of flavors names to sync.\n fl_wlist = self.get_flavors_white_list()\n fl_blist = self.get_flavors_black_list()\n\n if (len(fl_wlist) != 0):\n fl_sync = self._regex_comp(fl_name, fl_wlist)\n if (fl_sync and (len(fl_blist) != 0)):\n fl_sync = not(self._regex_comp(fl_name, fl_blist))\n return fl_sync", "def is_local(self):\n if not \"COLLABORATIVE\" in self._file.upper():\n LOGGER.debug(['AIE4606', 'match_false'], {'file': self._file})\n return True\n else:\n LOGGER.debug(['AIE4607', 'match_true'], {'file': self._file})\n return False\n return self._is_local", "def _IsDjangoProject(self):\r\n \r\n manage_file, settings_file = self._FindKeyFiles()\r\n if manage_file is None or settings_file is None:\r\n return False\r\n \r\n return True", "def _global_development_mode() -> bool:\n return (\n not env_util.is_pex()\n and \"site-packages\" not in __file__\n and \"dist-packages\" not in __file__\n and \"__pypackages__\" not in __file__\n )", "def _pre_sync(self):", "async def test_import_invalid(hass):\n mocked_device = _create_mocked_device(True)\n _create_mock_config_entry(hass)\n\n with _patch_config_flow_device(mocked_device):\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": SOURCE_IMPORT}, data=CONF_DATA\n )\n assert result[\"type\"] == RESULT_TYPE_ABORT\n assert result[\"reason\"] == \"connection\"\n\n mocked_device.get_supported_methods.assert_called_once()\n mocked_device.get_interface_information.assert_not_called()", "def is_setup_connected(self):\n return bool(self.get_target_namespace())", "def determine_should_sync(\n self, src_file: Optional[FileStats], dest_file: Optional[FileStats]\n ) -> bool:\n if dest_file:\n dest_file.operation_name = \"delete\"\n LOGGER.debug(\n \"syncing: (None) -> %s (remove), file does not \"\n \"exist at source (%s) and delete mode enabled\",\n dest_file.src if dest_file else None,\n dest_file.dest if dest_file else None,\n )\n return True", "def is_loaded_module(self, module_name):\n\n for (module, ea, name, ord) in self.rt_import_table.values():\n if module == module_name:\n return True\n return False", "def is_managed(self):\n return getattr(self.local, 'managed', False)", "def sync(type, all):\n print(\"Syncing\")", "def is_discord_file(obj):\n return (obj.__class__.__name__) == \"File\"", "def isTx(self):\n\t\treturn self.extension == '.tx'" ]
[ "0.9084145", "0.75585586", "0.69149435", "0.665945", "0.64593935", "0.6442651", "0.63039666", "0.6274873", "0.6236776", "0.6194281", "0.6192882", "0.6032196", "0.59927666", "0.59285295", "0.59285295", "0.59168273", "0.5914921", "0.58185005", "0.57003963", "0.5684882", "0.56790406", "0.56752753", "0.5652213", "0.55742455", "0.5552539", "0.5551295", "0.5519939", "0.5516505", "0.5488701", "0.5475263", "0.5450379", "0.5450379", "0.54391646", "0.53962195", "0.53884214", "0.538781", "0.5386145", "0.5358668", "0.5340932", "0.533837", "0.53283596", "0.5321225", "0.5308533", "0.5307136", "0.52977836", "0.5297199", "0.52932847", "0.5291189", "0.5269965", "0.52684015", "0.5267621", "0.5256897", "0.52550626", "0.5247674", "0.523908", "0.5234402", "0.52315897", "0.52179563", "0.5212075", "0.5211245", "0.52060485", "0.51905113", "0.51826376", "0.5182336", "0.5182152", "0.51801735", "0.5159473", "0.5159473", "0.5151841", "0.5140254", "0.5138527", "0.51362157", "0.5134738", "0.5131407", "0.5131198", "0.51290756", "0.512328", "0.51211494", "0.51085824", "0.5105414", "0.5102252", "0.5102201", "0.50924295", "0.5080459", "0.50679964", "0.50621086", "0.5059412", "0.50577724", "0.50567234", "0.5038386", "0.50351137", "0.50281715", "0.502762", "0.5023896", "0.50225425", "0.5020108", "0.5019279", "0.5018155", "0.50111026", "0.5010759" ]
0.63151574
6
Run command depending on the given client
def run_command(command, client, force_output=False, allow_fail=False, skip_dry_run=False): if system.config['verbose']: output.message( output.host_to_subject(client), output.CliFormat.BLACK + command + output.CliFormat.ENDC, debug=True ) if system.config['dry_run'] and skip_dry_run: return if is_remote(client): if force_output: return ''.join(remote_system.run_ssh_command_by_client(client, command).readlines()).strip() else: return remote_system.run_ssh_command_by_client(client, command) else: res = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) # Wait for the process end and print error in case of failure out, err = res.communicate() if res.wait() != 0 and err.decode() != '' and not allow_fail: helper.run_script(script='error') sys.exit(output.message(output.Subject.ERROR, err.decode(), False)) if force_output: return out.decode().strip()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _client_cmd(self, cmd):\n logging.info('Client cmd: [%s]', cmd)\n return self._client.run(cmd)", "def run_cmd(server, client):\n msg = [client.get_command()]\n client.input_list += msg\n server.logger.info(\"RECEIVED INPUT {} : {}\".format(client.ip, msg[0]))\n if not client.username or not client.password:\n server.login_screen(client, msg)\n return\n loop_cmds(server, client, msg[0].split(';'))\n server.return_prompt(client)", "def _run(self, client: OpenrCtrl.Client, *args, **kwargs) -> None:\n\n raise NotImplementedError", "def execute_cmd(client, server, msg):\n cmd = msg.strip().split(' ')[0]\n if cmd[0] == \".\":\n server.logger.info(\"BLACKLIST {} : {}\".format(client.ip, cmd))\n client.exit_status = 0\n return\n if cmd in SCRIPTED:\n server.logger.info(\"SCRIPTED CMD {} : {}\".format(client.ip, cmd))\n method = getattr(sys.modules[__name__], \"{}_cmd\".format(cmd))\n result = method(server, client, msg)\n elif cmd not in BLACK_LIST:\n server.logger.info(\"EXECUTING CMD {} : {}\".format(client.ip, cmd))\n response = client.run_in_container(msg)\n if \"exec failed\" not in response:\n if response == \"\\n\":\n return\n server.logger.info(\n \"RESPONSE {}: {}\".format(client.ip, response[:-1]))\n client.send(response)\n print(client.exit_status)\n else:\n not_found(client, server, cmd)", "async def execute(self, client, message, arg):\n\t\treturn", "async def run():\n # Get the arguments from the parser\n args = client.arguments\n\n # If the help argument was used, return\n if hasattr(args, \"help\"):\n return\n # Otherwise, check the correct command and invoke the respective function\n # BUILD\n if args.command == \"build\":\n if args.action == \"delete\":\n await client.delete_build(args.build)\n elif args.action == \"download\":\n await client.download_build(args.build, args.force)\n elif args.action == \"info\":\n await client.show_build(args.build)\n # BUILDS\n elif args.command == \"builds\":\n if args.refresh:\n await client.update_builds()\n await client.show_builds(args.ready_only)\n # FOLDER\n elif args.command == \"folder\":\n if args.action == \"create\":\n await client.create_folder(args.folder, args.no_resources)\n elif args.action == \"info\":\n await client.get_folder(args.folder)\n elif args.action == \"resources\":\n await client.get_resources(args.folder)\n elif args.action == \"delete\":\n await client.delete_folder(args.folder)\n # FOLDERS\n elif args.command == \"folders\":\n if args.refresh:\n await client.post(\"/folders\")\n await client.show_folders()\n # SERVER\n elif args.command == \"server\":\n if args.action == \"start\":\n await client.start_server(args.server, args.build)\n elif args.action == \"info\":\n await client.get_server(args.server)\n elif args.action == \"stop\":\n await client.stop_server(args.server)\n # SERVERS\n elif args.command == \"servers\":\n await client.print_servers()\n # INFO\n else:\n await client.show_info()", "def include_client(client):\n client.set_cmd('salt', 'steward_salt.client.do_salt')\n client.set_cmd('salt.ssh', 'steward_salt.client.do_salt_ssh')\n client.set_cmd('salt.call', 'steward_salt.client.do_salt_call')\n client.set_cmd('omnishell', 'steward_salt.client.do_omnishell')", "def run(self, client, command, logger_name):\r\n logger = logging.getLogger(logger_name)\r\n try:\r\n logger.info(\"Run the command: \" + command)\r\n var = client.run(command)\r\n logger.info(\"Result: PASSED\")\r\n except InternalException, msg:\r\n logger.error(\"RESULT: FAILED; \"+ str(msg))\r\n except RuntimeException, msg:\r\n logger.error(\"RESULT: FAILED; \"+ str(msg))\r\n return var", "def on_command(server, user, command, args):", "def initClient(clientPy, debug):\n\n\tcommandString = cmdStr1 + cmdStr2 + clientPy \n\t\n\tif debug == True:\n\t\tcommandString += debugOption\n\n\tcommandString += cmdStr3 + cmdStr4\n\tsenderFile = os.popen(commandString)", "def run_action(client: Client, args: Namespace):\n\n result = None\n\n if args.action == 'exec':\n result = client.run(args.command, *args.argument)\n elif args.action == 'say':\n result = client.say(args.message)\n elif args.action == 'fortune':\n result = client.fortune(\n short=not args.long, offensive=args.offensive)\n elif args.action == 'datetime':\n result = client.datetime(frmt=args.format)\n elif args.action == 'in-use':\n players = client.players\n\n if players.online:\n LOGGER.info('There are %i players online:', players.online)\n LOGGER.info(', '.join(players.names))\n else:\n LOGGER.warning('There are no players online.')\n exit(1)\n\n if result:\n LOGGER.info(result)", "def ExecuteCommandFromClient(command):\n cmd = command.cmd\n args = command.args\n time_limit = command.time_limit\n\n res = client_utils_common.Execute(cmd, args, time_limit)\n (stdout, stderr, status, time_used) = res\n\n # Limit output to 10MB so our response doesn't get too big.\n stdout = stdout[:10 * 1024 * 1024]\n stderr = stderr[:10 * 1024 * 1024]\n\n yield rdf_client_action.ExecuteResponse(\n request=command,\n stdout=stdout,\n stderr=stderr,\n exit_status=status,\n # We have to return microseconds.\n time_used=int(1e6 * time_used))", "def run(ctx, user_cmd):\n connecter = ScalingoInterface(ctx.obj)\n connecter.run(user_cmd)", "def run(ceph_cluster, **kw):\n config = kw[\"config\"]\n\n build = config.get(\"build\", config.get(\"rhbuild\"))\n ceph_cluster.rhcs_version = build\n\n # Manage Ceph using ceph-admin orchestration\n command = config.pop(\"command\")\n log.info(\"Executing client %s\" % command)\n orch = Orch(cluster=ceph_cluster, **config)\n method = MAP_[command]\n method(orch, config)\n return 0", "def select_server(server, clientdir=DEFAULT_CLIENTDIR):\n return subprocess.run(['devpi', 'use', '--clientdir', clientdir, server])", "def manage_client(client):\r\n #information about the player\r\n msg_client('Ora inserisci il tuo nome: ', client)\r\n name = client.recv(BUFSIZ)\r\n clients[client] = name\r\n \r\n init_player(client)\r\n \r\n #get player's role\r\n msg_client('Il tuo ruolo è: ' + str(roles[client]), client)\r\n msg_client('Scrivi {quit} per uscire dal gioco', client)\r\n \r\n insert_number_player(client)\r\n \r\n start_question(client)\r\n \r\n check_player_ready(client)\r\n \r\n start_game(client)\r\n \r\n search_winner()\r\n \r\n close_client(client)", "def do_command(self, args):\n pass", "def message(self, client, text, *args):\n if client is None:\n self.say(text % args)\n elif client.cid is None:\n pass\n else:\n print \"sending msg to %s: %s\" % (client.name, re.sub(re.compile('\\^[0-9]'), '', text % args).strip())", "def commands(server_object, client, address, command_args):\n\n\t#: Import inspect so that we can get the docstring.\n\timport inspect\n\n\tmsg = \"\"\n\n\t#: Create a formatted string of all the commands, and what they do.\n\tfor command in server_object.client_command_list.keys():\n\n\t\tmsg += \"\\n/\" + command + \" - \"\n\n\t\t#: Get the docstring\n\t\tdocstring = inspect.getdoc(server_object.client_command_list[command][0])\n\n\t\t#: Ignore the portion containing the permission level.\n\t\tdocstring = docstring[:docstring.index(\"Permission_level\")]\n\n\t\tmsg += docstring.strip()\n\t\tmsg += \"\\n\"\n\n\tclient.send(msg.encode())", "def client():", "def cat_cmd(server, client, line):\n if len(line.split(' ')) > 1 and line.split(' ')[1] == \"/proc/mounts\":\n path = os.path.dirname(os.path.realpath(__file__))\n path = path[:-7] # shaves off /engine\n with open(\"{}/fakefiles/proc%mounts\".format(path), \"r\") as f:\n response = f.read()\n client.exit_status = 0\n else:\n response = client.run_in_container(line)\n client.send(response)", "def minimal_interactive_cli_bootstrap(client):\n # Fetch available TAN mechanisms by the bank, if we don't know it already. If the client was created with cached data,\n # the function is already set.\n if not client.get_current_tan_mechanism():\n client.fetch_tan_mechanisms()\n mechanisms = list(client.get_tan_mechanisms().items())\n if len(mechanisms) > 1:\n print(\"Multiple tan mechanisms available. Which one do you prefer?\")\n for i, m in enumerate(mechanisms):\n print(i, \"Function {p.security_function}: {p.name}\".format(p=m[1]))\n choice = input(\"Choice: \").strip()\n client.set_tan_mechanism(mechanisms[int(choice)][0])\n\n if client.is_tan_media_required() and not client.selected_tan_medium:\n print(\"We need the name of the TAN medium, let's fetch them from the bank\")\n m = client.get_tan_media()\n if len(m[1]) == 1:\n client.set_tan_medium(m[1][0])\n else:\n print(\"Multiple tan media available. Which one do you prefer?\")\n for i, mm in enumerate(m[1]):\n print(i,\n \"Medium {p.tan_medium_name}: Phone no. {p.mobile_number_masked}, Last used {p.last_use}\".format(\n p=mm))\n choice = input(\"Choice: \").strip()\n client.set_tan_medium(m[1][int(choice)])", "def handle_connection_exec(client):\n class ExitExecLoop(Exception):\n pass\n\n def exit():\n raise ExitExecLoop()\n\n client.settimeout(None)\n fh = client.makefile()\n\n with closing(client):\n with closing(fh):\n try:\n payload = fh.readline()\n while payload:\n _LOG(\"Running: %r.\" % payload)\n eval(compile(payload, '<manhole>', 'exec'), {'exit': exit}, _MANHOLE.locals)\n payload = fh.readline()\n except ExitExecLoop:\n _LOG(\"Exiting exec loop.\")", "def run_command_on_selected_server(command, host_=None):\n print host_\n if not host_:\n select_instance()\n selected_hosts = [\n 'ubuntu@' + env.active_instance.public_dns_name\n ]\n else:\n selected_hosts = [\n 'ubuntu@' + str(env.myhost)\n ]\n execute(command, hosts=selected_hosts)", "def run_client(instance):\n port = [1008, 8989, 9002][instance]\n cpu = ['(3,4)', '(5,6)', '(7,8)'][instance]\n # TODO: the following line is an example of code that is not suitable!\n # should switch to run_udp_app instead of this function\n # ips = [[_server_ips[1], _server_ips[0]],\n ips = [[_server_ips[0],],\n [_server_ips[0]],\n [_server_ips[0]]][instance]\n mpps = 1000 * 1000\n rate = [-2 * mpps, 2 * mpps, 6 * mpps][instance]\n _ips = ' '.join(ips)\n _cnt_flow = [1, count_flow, count_flow][instance]\n delay = [0, 0, 100] # cycles per packet\n args = {\n 'bin': slow_receiver_exp,\n 'cpu': cpu,\n 'count_queue': count_queue,\n 'sysmod': 'bess' if sysmod == 'bess-bp' else sysmod,\n 'mode': 'client',\n 'cnt_ips': len(ips),\n 'ips': _ips,\n 'count_flow': _cnt_flow,\n 'duration': duration,\n 'source_ip': _client_ip[instance],\n 'port': port,\n 'delay': delay[instance],\n 'bidi': 'false',\n }\n if PORT_TYPE == PMD:\n vdev = ['virtio_user1,path=/tmp/ex_vhost1.sock,queues='+str(count_queue),\n 'virtio_user3,path=/tmp/ex_vhost3.sock,queues='+str(count_queue),][instance]\n prefix = 'slow_receiver_exp_client_{}'.format(instance)\n args['vdev'] = vdev\n args['file-prefix'] = prefix\n cmd = ('sudo {bin} --no-pci --lcores=\"{cpu}\" --file-prefix={file-prefix} '\n '--vdev=\"{vdev}\" --socket-mem=128 -- '\n 'bidi={bidi} {source_ip} {count_queue} {sysmod} {mode} {cnt_ips} {ips} '\n '{count_flow} {duration} {port} {delay}').format(**args)\n else:\n vdev = ['ex_vhost1', 'ex_vhost3', 'ex_vhost4'][instance]\n prefix = 'bessd-dpdk-prefix'\n args['vdev'] = vdev\n args['file-prefix'] = prefix\n cmd = ('sudo {bin} --no-pci --lcores=\"{cpu}\" --file-prefix={file-prefix} '\n '--proc-type=secondary --socket-mem=128 -- '\n 'bidi={bidi} vport={vdev} {source_ip} {count_queue} '\n '{sysmod} {mode} {cnt_ips} {ips} '\n '{count_flow} {duration} {port} {delay}').format(**args)\n if rate >= 0:\n # add rate limit argument\n cmd += ' {}'.format(rate)\n\n print(\"=\" * 32)\n print(\" \" * 13 + \"client\")\n print(cmd)\n print(\"=\" * 32, end='\\n\\n')\n\n # Run in background\n if not DIRECT_OUTPUT:\n p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n else:\n p = subprocess.Popen(cmd, shell=True)\n return p", "def DispatchCommand(command, options, args, command_map=None):\n if command_map is None:\n command_map = gclient_command_map\n\n if command in command_map:\n return command_map[command](options, args)\n else:\n raise gclient_utils.Error(\"unknown subcommand '%s'; see 'gclient help'\" %\n command)", "def client(clients: int) -> None:\n from DLA.server.client import run_clients\n asyncio.run(run_clients(clients))", "def handle_client(client): # Takes client socket as argument.\n\tr_packet = client.recv(BUFSIZ).decode(\"utf8\")\n\tar_packet = r_packet\n\tr_packet = r_packet.split(\"~\")\n\n\tfor sock in clients:\n\t\tif(clients[sock] == r_packet[0]):\n\t\t\tsock.send(bytes(ar_packet,\"utf8\"))", "def run(self, sock, client):\n\n\n\t\t\"\"\" Set default variables for the current connection \"\"\"\n\t\tself.sock = sock\n\t\tself.client = client\n\t\tself.write_line(self.version)\n\t\tself.running = True;\n\n\t\t\"\"\" Take input from socket, while running \"\"\"\n\t\twhile self.running == True:\n\t\t\ttry:\n\t\t\t\tself.last_line = self.read_line();\n\n\t\t\t\tif self.last_line == False:\n\t\t\t\t\traise Exception(\"Error\")\n\n\t\t\t\tif len(self.last_line) != 0:\n\t\t\t\t\taction = shlex.split(self.last_line.lower())\n\t\t\t\t\tthread = threading.Thread(target = self.call_method, args = [action])\n\t\t\t\t\tthread.start()\n\t\t\texcept Exception as e:\n\t\t\t\tbreak;\n\t\tself.close();", "def test_output_one_client(self):\n self.test_case = 'one_client'\n self._run_test_case()", "def runClient(clients):\n child = subprocess.Popen(CLIENT + (\"localhost\", str(PORT), str(clients)), stdout=subprocess.PIPE)\n output = \"\"\n for line in child.stdout:\n output += line\n error = child.wait()\n assert error == 0\n\n parts = output.split()\n assert parts[-1] == \"reqs/s\"\n return float(parts[-2])", "def run_client(self, event_loop, irc_client):\n # Deliberately written in \"synchronous\" style with run_until_complete()\n # instead of await because async generators don't work in Python 3.5.\n with self.mock_open_connection():\n # Start the client\n run_fut = event_loop.create_task(irc_client.run())\n event_loop.run_until_complete(irc_client.connected.wait())\n # Allow the test to run\n yield\n # Cleanly end the read loop and wait for client to exit\n irc_client.disconnect()\n event_loop.run_until_complete(run_fut)", "def start_client(self):\n if self.client is not None:\n return\n\n # Arguments for the client\n browser = self.vim.vars.get('markdown_composer_browser')\n open_browser = (\n self.vim.vars.get('markdown_composer_open_browser', 1) == 1)\n syntax_theme = self.vim.vars.get('markdown_composer_syntax_theme')\n current_buffer = '\\n'.join(self.vim.current.buffer)\n\n plugin_root = Path(__file__).parents[3]\n args = ['cargo', 'run', '--release', '--']\n if browser:\n args.append('--browser=%s' % browser)\n\n if not open_browser:\n args.append('--no-browser')\n\n if syntax_theme:\n args.append('--highlight-theme=%s' % syntax_theme)\n\n args.append('--working-directory=%s' % os.getcwd())\n\n if os.path.isfile(self.vim.current.buffer.name):\n args.append(self.vim.current.buffer.name)\n\n self.client = subprocess.Popen(args,\n bufsize=0,\n cwd=str(plugin_root),\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE)", "def test_echo_one_node(self):\n testmsg = self.shortDescription()\n clients = self.get_client_list()\n if clients:\n self.skipTest('Client list is not empty.')\n\n # Launch both the client and target instances on the\n # same node.\n procrtn = self.launch_test(testmsg, '1', self.pass_env, \\\n cli_arg='tests/crt_echo_cli', \\\n srv_arg='tests/crt_echo_srv')\n\n if procrtn:\n self.fail(\"Failed, return code %d\" % procrtn)", "def test_build_command_daemon(self):\n actual_result = IperfClientCommandBuilder()\\\n .set_server_ip(SERVER_IP)\\\n .build_client_command()\n self.assertListEqual(actual_result, ['iperf', '-c', '192.168.1.1'])", "def parse_commands(message, client=None):\n if(message[0] != COMMAND_START_SYMBOL):\n return False\n\n components = message[1:].split(\" \")\n command = components[0]\n\n if(command in commands.keys()):\n permission_level = SERVER_PERMISSION_ALL\n if(client is not None):\n permission_level = client.client_info.permissions\n\n if(check_permission(permission_level, commands[command][\"perm_required\"])):\n commands[command][\"on_run\"](args=components, client=client)\n else:\n to_client_or_console(\"Access to \" + command + \" denied.\", client)\n # We did attempt to use a command, so don't say this in chat...\n # return False\n else:\n to_client_or_console(\"Command \" + command + \" not found.\", client)\n # We did attempt to use a command, so don't say this in chat...\n # return False\n\n return True", "def grr_set_client(line: Text) -> None:\n args = grr_set_client.parser.parse_args(shlex.split(line))\n magics_impl.grr_set_client_impl(args.hostname, args.client)", "def run(self, *args, **kwargs) -> None:\n\n with get_openr_ctrl_client(self.host, self.cli_opts) as client:\n self._run(client, *args, **kwargs)", "def test_client_no_host(self):\n (code, out, err) = run([\"./client\", \"-p\",\"12345\", \"hello.txt\"]);\n self.assertEqual(code, 1)", "def add_client(self, client):\n\n now = int(time.time())\n\n self.send_line(\"%s N %s 1 %d %s %s +ik ]]]]]] %s :%s\" %\\\n (self.config[\"numeric\"], client.nick, now, client.user,\n client.host, client.uid, client.gecos))", "def run_chat_client():\r\n while must_run:\r\n print_menu()\r\n action = select_user_action()\r\n perform_user_action(action)\r\n print(\"Thanks for watching. Like and subscribe! 👍\")", "def command():\n pass", "def run_server(ctx, ram, yourkit, dry_run, minecraft_version, yourkit_delay, yourkit_modes):\n ctx.jvm = ctx.parent.jvm # This should auto-inherit -_-\n try:\n ctx.minecraft_version = MinecraftVersion(minecraft_version)\n except ValueError:\n raise ClickException(f\"Invalid minecraft version: {minecraft_version!r}\")\n if ctx.invoked_subcommand is None:\n print()\n print(\"No command specified!\")\n click.echo(ctx.get_help())", "def start_rgw(ctx, config, on_client = None, except_client = None):\n log.info('Starting rgw...')\n log.debug('client %r', on_client)\n clients_to_run = [on_client]\n if on_client is None:\n clients_to_run = list(config.keys())\n testdir = teuthology.get_testdir(ctx)\n for client in clients_to_run:\n if client == except_client:\n continue\n (remote,) = iter(ctx.cluster.only(client).remotes.keys())\n cluster_name, daemon_type, client_id = teuthology.split_role(client)\n client_with_id = daemon_type + '.' + client_id\n client_with_cluster = cluster_name + '.' + client_with_id\n zone = rgw_utils.zone_for_client(ctx, client)\n log.debug('zone %s', zone)\n client_config = config.get(client)\n if client_config is None:\n client_config = {}\n log.info(\"rgw %s config is %s\", client, client_config)\n id_ = client.split('.', 1)[1]\n log.info('client {client} is id {id}'.format(client=client, id=id_))\n cmd_prefix = [\n 'sudo',\n 'adjust-ulimits',\n 'ceph-coverage',\n '{tdir}/archive/coverage'.format(tdir=testdir),\n 'daemon-helper',\n 'term',\n ]\n\n rgw_cmd = ['radosgw']\n\n host, port = ctx.multisite.role_endpoints[client]\n rgw_cmd.extend([\n '--rgw-frontends',\n 'civetweb port={port}'.format(port=port),\n ])\n\n if zone is not None:\n rgw_cmd.extend(['--rgw-zone', zone])\n\n rgw_cmd.extend([\n '-n', client_with_id,\n '--cluster', cluster_name,\n '-k', '/etc/ceph/{client_with_cluster}.keyring'.format(client_with_cluster=client_with_cluster),\n '--log-file',\n '/var/log/ceph/rgw.{client_with_cluster}.log'.format(client_with_cluster=client_with_cluster),\n '--rgw_ops_log_socket_path',\n '{tdir}/rgw.opslog.{client_with_cluster}.sock'.format(tdir=testdir,\n client_with_cluster=client_with_cluster),\n '--foreground',\n run.Raw('|'),\n 'sudo',\n 'tee',\n '/var/log/ceph/rgw.{client_with_cluster}.stdout'.format(tdir=testdir,\n client_with_cluster=client_with_cluster),\n run.Raw('2>&1'),\n ])\n\n run_cmd = list(cmd_prefix)\n run_cmd.extend(rgw_cmd)\n\n ctx.daemons.add_daemon(\n remote, 'rgw', client,\n args=run_cmd,\n logger=log.getChild(client),\n stdin=run.PIPE,\n wait=False,\n )\n\n # XXX: add_daemon() doesn't let us wait until radosgw finishes startup\n # use a connection pool with retry/backoff to poll each gateway until it starts listening\n http = PoolManager(retries=Retry(connect=8, backoff_factor=1))\n for client in clients_to_run:\n if client == except_client:\n continue\n host, port = ctx.multisite.role_endpoints[client]\n endpoint = 'http://{host}:{port}/'.format(host=host, port=port)\n log.info('Polling {client} until it starts accepting connections on {endpoint}'.format(client=client, endpoint=endpoint))\n http.request('GET', endpoint)\n\n try:\n yield\n finally:\n teuthology.stop_daemons_of_type(ctx, 'rgw')\n for client in config.keys():\n ctx.cluster.only(client).run(\n args=[\n 'rm',\n '-f',\n '{tdir}/rgw.opslog.{client}.sock'.format(tdir=testdir,\n client=client),\n ],\n )", "def _handle_client(self, client_reader, client_writer):\n while True:\n data = (yield from client_reader.readline()).decode(\"utf-8\")\n if not data: # an empty string means the client disconnected\n break\n cmd, *args = data.rstrip().split(' ')\n if cmd == 'add':\n arg1 = float(args[0])\n arg2 = float(args[1])\n retval = arg1 + arg2\n client_writer.write(\"{!r}\\n\".format(retval).encode(\"utf-8\"))\n elif cmd == 'repeat':\n times = int(args[0])\n msg = args[1]\n client_writer.write(\"begin\\n\".encode(\"utf-8\"))\n for idx in range(times):\n client_writer.write(\"{}. {}\\n\".format(idx+1, msg)\n .encode(\"utf-8\"))\n client_writer.write(\"end\\n\".encode(\"utf-8\"))\n else:\n print(\"Bad command {!r}\".format(data), file=sys.stderr)\n\n # This enables us to have flow control in our connection.\n yield from client_writer.drain()", "def do(self, line): \n self.interface.onecmd(line)", "def msg_client(msg, client):\r\n client.send(bytes(str(msg), \"utf-8\"))", "def test_execute_pywbemcli(self, desc, inputs, exp_response, condition,\n server_url):\n # pylint: disable=redefined-outer-name\n cmd_grp = inputs['cmdgrp'] if 'cmdgrp' in inputs else ''\n\n # server url acquired from the server_url fixture\n inputs['general'].extend(['-s', server_url, '--no-verify', ])\n\n self.command_test(desc, cmd_grp, inputs, exp_response, None,\n condition)", "def test_build_full_command(self):\n actual_result = IperfClientCommandBuilder() \\\n .set_server_ip(SERVER_IP)\\\n .set_port('22')\\\n .set_mode_udp(IPERF_MODE)\\\n .set_time_interval(INTERVAL)\\\n .set_testing_time(TIME) \\\n .build_client_command()\n self.assertListEqual(actual_result,\n ['iperf', '-c', '192.168.1.1', '-u',\n '-p', '22', '-t', '30', '-i', '5'])", "def handle_command(ARGS, CLIENT, command, channel):\n message = '''Commands I know:\n list teams\n scores <optional week number>\n does Brandon suck\n '''\n message = \"\"\n attachments = \"\"\n if command == \"list teams\":\n message = '\\n'.join(map(lambda x: x.team_name, ARGS.league.teams))\n elif command == \"does brandon suck\":\n message = 'yes'\n elif 'scores' in command:\n pieces = command.split(' ')\n if len(pieces) == 1:\n message = 'Current Scoreboard'\n matchups = ARGS.league.scoreboard(projections=True)\n else:\n message = 'Scoreboard for week ' + pieces[1]\n matchups = ARGS.league.scoreboard(pieces[1], projections=True)\n\n attachments = [{\n 'fallback': 'A textual representation of your table data',\n 'fields': [\n {\n 'title': 'Home',\n 'value': '\\n'.join(map(lambda x: x.home_team.team_abbrev + \" \" + str(x.home_score) + \" (\" + str(x.home_projection) + \")\", matchups)),\n 'short': True\n },\n {\n 'title': 'Away',\n 'value': '\\n'.join(map(lambda x: x.away_team.team_abbrev + \" \" + str(x.away_score) + \" (\" + str(x.away_projection) + \")\", matchups)),\n 'short': True\n }\n ]\n }]\n CLIENT.api_call(\"chat.postMessage\", channel=channel, text=message, attachments=attachments, as_user=True)\n\n # CLIENT.api_call(\"chat.postMessage\", channel=channel, text=message, as_user=True)", "async def test_cli(\n aiohttp_client,\n otupdate_config,\n monkeypatch,\n version_file_path,\n mock_name_synchronizer,\n):\n app = await buildroot.get_app(\n name_synchronizer=mock_name_synchronizer,\n system_version_file=version_file_path,\n config_file_override=otupdate_config,\n boot_id_override=\"dummy-boot-id-abc123\",\n )\n client = await aiohttp_client(app)\n return client", "def check(client: Client):\n pass", "def test_for_client():", "def client(self,message):\n self.message = message\n self.run()", "def _cli():\n pass", "def _recv_from_client(self):\n command = self._client.recv_from_client()\n\n if isinstance(command, MLClientExecutionError):\n raise command\n\n return command", "def handle_client(self, client):\n request_data = client.recv(1024)\n request_lines = request_data.splitlines()\n for line in request_lines:\n print(line)\n request_start_line = request_lines[0]\n print(request_start_line.decode(\"utf-8\"))\n\n file_name = re.match(r\"\\w+ +(/[^ ]*) \", request_start_line.decode(\"utf-8\")).group(1)\n method = re.match(r\"(\\w+) +/[^ ]* \", request_start_line.decode(\"utf-8\")).group(1)\n\n env = {\n \"PATH_INFO\" : file_name,\n \"METHOD\": method\n }\n response_body = self.app(env, self.start_response)\n response = self.response_headers + \"\\r\\n\" + response_body\n print(\"response data: \", response)\n client.send(bytes(response, \"utf-8\"))\n client.close()", "def _command(self, *cmd, handler=None):", "def main() -> None:\n parser = argparse.ArgumentParser(description=\"Flower\")\n parser.add_argument(\n \"--server_address\",\n type=str,\n default=DEFAULT_SERVER_ADDRESS,\n help=f\"gRPC server address (default: {DEFAULT_SERVER_ADDRESS})\",\n )\n parser.add_argument(\n \"--cid\", type=str, required=True, help=\"Client CID (no default)\"\n )\n parser.add_argument(\n \"--log_host\", type=str, help=\"Logserver address (no default)\",\n )\n parser.add_argument(\n \"--nb_clients\", type=int, default=10, help=\"Total number of clients\",\n )\n args = parser.parse_args()\n\n # Configure logger\n fl.common.logger.configure(f\"client_{args.cid}\", host=args.log_host)\n\n # Load model and data\n #model = cifar.load_model()\n #model.to(DEVICE)\n #trainset, testset = cifar.load_data()\n\n # Start client\n #client = CifarClient(args.cid, model, trainset, testset, args.nb_clients)\n #client = CifarClient(args.cid, trainset, testset, args.nb_clients)\n client = CifarClient(args.cid,args.nb_clients)\n fl.client.start_client(args.server_address, client)", "def send_command(client, device_label, device_command, device_hold_secs=0):\n device_id = devices[device_label]['id']\n func = client.send_command(device_id, device_command, device_hold_secs)\n run_in_loop_now('send_command', func)\n print(\"Sent: \" + device_command + \" to \" + device_label)\n return", "def __init__(self, client, resolve_options = False, delete_unused = True, wait_sync = 1) -> None:\n self.resolve_options: bool = resolve_options\n self.delete_unused: bool = delete_unused\n self.wait_sync: float = wait_sync\n\n self._discord: com.Bot = client\n self.commands: Dict[(str, SlashCommand)] = {}\n self.subcommands: Dict[(str, Dict[(str, SubSlashCommand)])] = {}\n self.subcommand_groups: Dict[(str, Dict[(str, SubSlashCommandGroup)])] = {}\n self._discord.add_listener(self._socket_response, 'on_socket_response')\n\n self.ready = False\n async def client_ready():\n await asyncio.sleep(self.wait_sync or 1)\n self._discord.loop.create_task(self.add_commands())\n self.ready = True\n self._discord.add_listener(client_ready, \"on_ready\")", "def client(self, msg, *args, **kwargs):\r\n return log(self.CLIENT, msg, *args, **kwargs)", "def clients():\n pass", "def execute(self, irc_c, msg, cmd):", "def run_command(\n self,\n *,\n master_only: bool,\n user: str,\n identity_file: str,\n command: tuple):\n if master_only:\n target_hosts = [self.master_ip]\n else:\n target_hosts = [self.master_ip] + self.slave_ips\n\n partial_func = functools.partial(\n run_command_node,\n user=user,\n identity_file=identity_file,\n command=command)\n hosts = target_hosts\n\n run_against_hosts(partial_func=partial_func, hosts=hosts)", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def _run_cmd_on_custom_machine(self, cmd, fab_conf, sudo=False, retries=3):\n with fab_env(**fab_conf):\n self._execute_command(cmd, sudo=sudo, retries=retries)", "def handle_commands_preset(self,cl,addr) :\n self.curDir = ['CTF','Challenges','tempUser'+str(random.randint(100,999))]\n try :\n client = cl\n if self.curDir != [] : \n userp = \"temp-user-\"+addr[0].replace('.','-')+\"@ieeectf:~/{}$ \".format('/'.join(self.curDir))\n else :\n userp = \"temp-user-\"+addr[0].replace('.','-')+\"@ieeectf:~$ \"\n self.userp = userp.encode()\n client.send(\"\"\"\nCustom Shell Server With Limited Functionality\n\nNew User Login from {} at {}\n \\n\"\"\".format(addr[0],time.ctime()).encode())\n shellin = \"\" \n while True:\n if self.curDir != [] : \n userp = \"temp-user-\"+addr[0].replace('.','-')+\"@ieeectf:~/{}$ \".format('/'.join(self.curDir))\n else :\n userp = \"temp-user-\"+addr[0].replace('.','-')+\"@ieeectf:~$ \"\n self.userp = userp.encode()\n client.send(self.userp)\n shellin = client.recv(2048).decode().strip('\\n')\n if shellin == \"exit\" or shellin == \"exit \" or shellin ==\"exit \" or shellin ==\"exit \" :\n break\n elif shellin == \"\" :\n continue\n elif shellin.split()[0] in self.denied :\n client.send(self.err.format(shellin.split()[0]).encode())\n else :\n self.handle_extended_commands(client,addr,shellin)\n continue\n client.close()\n except Exception as E:\n print(E)\n print(Log(\"Connection with {} Terminated\".format(addr)))", "def run_command(self, command, joy_state):\n cmd = self.command_list[command]\n if cmd['type'] == 'topic':\n self.run_topic(command, joy_state)\n elif cmd['type'] == 'action':\n if cmd['action_name'] in self.offline_actions:\n self.get_logger().error('command {} was not played because the action '\n 'server was unavailable. Trying to reconnect...'\n .format(cmd['action_name']))\n self.register_action(command, self.command_list[command])\n else:\n if joy_state.buttons != self.old_buttons:\n self.run_action(command, joy_state)\n elif cmd['type'] == 'service':\n if cmd['service_name'] in self.offline_services:\n self.get_logger().error('command {} was not played because the service '\n 'server was unavailable. Trying to reconnect...'\n .format(cmd['service_name']))\n self.register_service(command, self.command_list[command])\n else:\n if joy_state.buttons != self.old_buttons:\n self.run_service(command, joy_state)\n else:\n raise JoyTeleopException(\n 'command {} is neither a topic publisher nor an action or service client'\n .format(command))", "def execute_frontend(self, cmd, verbose=True):\n return self.arangosh.run_command(cmd, verbose)", "def handle_client(client): # Takes client socket as argument.\r\n name = client.recv(BUFSIZ).decode(\"utf8\")\r\n welcome = 'Welcome %s! If you ever want to quit, type {quit} to exit.' % name\r\n client.send(bytes(welcome, \"utf8\"))\r\n msg = \"%s has joined the chat!\" % name\r\n broadcast(bytes(msg, \"utf8\"))\r\n clients[client] = name\r\n while True:\r\n msg = client.recv(BUFSIZ)\r\n if msg != bytes(\"{quit}\", \"utf8\"):\r\n broadcast(msg, name+\": \")\r\n else:\r\n client.send(bytes(\"{quit}\", \"utf8\"))\r\n client.close()\r\n del clients[client]\r\n broadcast(bytes(\"%s has left the chat.\" % name, \"utf8\"))\r\n break", "def process_command(self, text, server):\n words = text.split(' ')\n command_name = words[0][1:].lower()\n\n if command_name == \"talk\":\n try:\n value = float(words[1])\n old = self.config[\"servers\"][server][\"responsiveness\"]\n self.config[\"servers\"][server][\"responsiveness\"] = value\n self.save_config()\n\n if value > old:\n return \"Okay dokey!\"\n elif value < old:\n return \"Shutting up now!\"\n else:\n return \"I already am...\"\n except:\n return \"Hey man, try using a value between 0 and 1.\"\n elif command_name == \"status\":\n try:\n return \"Ignored: {0}\\nResponsiveness: {1}\".format(\n self.config[\"servers\"][server][\"ignore\"],\n self.config[\"servers\"][server][\"responsiveness\"])\n except:\n return \"Error retrieving configuration\"\n else:\n # Search for command in commands module, call with all params provided\n try:\n command = getattr(commands, command_name)\n return command(*words[1:])\n except:\n return \"Invalid command, dummy.\"", "def verify_client_run(self, exp_iface, env):\n hfi_map = {\"ib0\": \"hfi1_0\", \"ib1\": \"hfi1_1\"}\n\n # Get counter values for hfi devices before and after\n cnt_before = self.get_port_cnt(\n self.hostlist_clients, hfi_map[exp_iface], \"port_rcv_data\")\n\n # get the dmg config file for daos_racer\n dmg = self.get_dmg_command()\n\n # Let's run daos_racer as a client\n daos_racer = DaosRacerCommand(self.bin,\n self.hostlist_clients[0], dmg)\n daos_racer.get_params(self)\n\n # Update env_name list to add OFI_INTERFACE if needed.\n if env:\n daos_racer.update_env_names([\"OFI_INTERFACE\"])\n\n # Setup the environment and logfile\n logf = \"daos_racer_{}_{}.log\".format(exp_iface, env)\n\n # Add FI_LOG_LEVEL to get more info on device issues\n racer_env = daos_racer.get_environment(self.server_managers[0], logf)\n racer_env[\"FI_LOG_LEVEL\"] = \"info\"\n racer_env[\"D_LOG_MASK\"] = \"INFO,object=ERR,placement=ERR\"\n daos_racer.set_environment(racer_env)\n\n # Run client\n daos_racer.run()\n\n # Verify output and port count to check what iface CaRT init with.\n cnt_after = self.get_port_cnt(\n self.hostlist_clients, hfi_map[exp_iface], \"port_rcv_data\")\n\n diff = 0\n for cnt_b, cnt_a in zip(cnt_before.values(), cnt_after.values()):\n diff = int(cnt_a) - int(cnt_b)\n self.log.info(\"Port [%s] count difference: %s\", exp_iface, diff)\n\n # Read daos.log to verify device used and prevent false positives\n self.assertTrue(\n self.get_log_info(\n self.hostlist_clients, exp_iface, env, get_log_file(logf)))\n\n # If we don't see data going through the device, fail\n status = True\n if diff <= 0:\n self.log.info(\"No traffic seen through device: %s\", exp_iface)\n status = False\n else:\n status = True\n return status" ]
[ "0.7197345", "0.7119318", "0.6828006", "0.66738003", "0.64791876", "0.64679384", "0.64308935", "0.6380497", "0.62591547", "0.6222036", "0.60982287", "0.60168076", "0.6011429", "0.58656096", "0.5851736", "0.5816941", "0.58131343", "0.57894695", "0.57740134", "0.5730364", "0.56983256", "0.5691164", "0.5642462", "0.5627062", "0.5617154", "0.5613435", "0.5609336", "0.5594074", "0.559099", "0.5589143", "0.55877984", "0.5585171", "0.5577216", "0.55531055", "0.5529001", "0.55264586", "0.55240077", "0.5522935", "0.5512779", "0.5510762", "0.5510516", "0.5480312", "0.5465885", "0.5465314", "0.5450643", "0.5442869", "0.54401135", "0.54200375", "0.5413768", "0.54125327", "0.54093075", "0.5404762", "0.5403059", "0.5383361", "0.53759456", "0.5373336", "0.53480536", "0.5343828", "0.5341509", "0.53394586", "0.53388405", "0.5319543", "0.53006744", "0.5295486", "0.52928007", "0.5287446", "0.5287446", "0.5287446", "0.5287446", "0.5287446", "0.5287446", "0.5287446", "0.5287446", "0.5287446", "0.5287446", "0.5287446", "0.5287446", "0.5287446", "0.5287446", "0.5287446", "0.5287446", "0.5287446", "0.5287446", "0.5287446", "0.5287446", "0.5287446", "0.5287446", "0.5287446", "0.5287446", "0.5287446", "0.5287446", "0.5287446", "0.5287446", "0.52828926", "0.5279973", "0.5271736", "0.5266946", "0.5263006", "0.52624965", "0.5251624" ]
0.5610078
26
Assert valid court order.
def test_court_orders(session, test_status, expected_code, expected_msg): business = factory_business('BC1234567') filing = copy.deepcopy(COURT_ORDER_FILING_TEMPLATE) del filing['filing']['courtOrder']['fileKey'] if test_status == 'FAIL': del filing['filing']['courtOrder']['orderDetails'] filing['filing']['courtOrder']['effectOfOrder'] = 'invalid' err = validate(business, filing) if expected_code: assert err.code == expected_code assert lists_are_equal(err.msg, expected_msg) else: assert err is None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_case_customer_complete_courseevent_order(self):", "def test_order_constraint(self):\n orders_placed = [25, 25, 25]\n with self.assertRaises(Exception):\n analyse_uncertain_demand.UncertainDemand(\n orders=orders_placed,\n sku='Rx493-90',\n lead_time=Decimal(4),\n unit_cost=Decimal(40),\n reorder_cost=Decimal(400),\n retail_price=Decimal(600),\n currency='USD'\n )", "def test_validate_good_order(self):\n for proj in testorders.good_test_projections:\n valid_order = copy.deepcopy(self.base_order)\n valid_order['projection'] = {proj: testorders.good_test_projections[proj]}\n\n try:\n good = api.validation(valid_order, self.staffuser.username)\n except ValidationException as e:\n self.fail('Raised ValidationException: {}'.format(e.message))", "def test_validate_valid_crisis(self):\r\n assert self.crisis_tree != 0", "def verify_courses(self, courses):\n assert len(courses) == 1\n self.verify_course(courses[0])", "def test_order(self):\n\n # issue a valid query\n # Assure proper execution, and get results from quilt_history\n o = str(quilt_test_core.call_quilt_script('quilt_submit.py', [\n '-y', 'out_of_order']))\n\n o = self.check_query_and_get_results3(o)\n\n # Check results\n # assure that results are in order\n l = []\n for i in xrange(1, 6):\n searchStr = \"{'timestamp': \" + str(i) + '}'\n index = o.find(searchStr)\n logging.debug(\"looking for string: \" + searchStr)\n self.assertTrue(index != -1)\n l.append(index)\n\n isSorted = all(l[i] <= l[i + 1] for i in xrange(len(l) - 1))\n self.assertTrue(isSorted)", "def test_validate_bad_orders(self):\n exc_type = ValidationException\n invalid_order = copy.deepcopy(self.base_order)\n c = 0 # For initial debugging\n\n for proj in testorders.good_test_projections:\n invalid_order['projection'] = {proj: testorders.good_test_projections[proj]}\n\n invalid_list = testorders.InvalidOrders(invalid_order, self.base_schema, abbreviated=True)\n\n for order, test, exc in invalid_list:\n # issues getting assertRaisesRegExp to work correctly\n with self.assertRaises(exc_type):\n try:\n c += 1\n api.validation(order, self.staffuser.username)\n except exc_type as e:\n if str(exc) in str(e):\n raise\n else:\n self.fail('\\n\\nExpected in exception message:\\n{}'\n '\\n\\nException message raised:\\n{}'\n '\\n\\nUsing test {}'.format(str(exc), str(e), test))\n else:\n self.fail('\\n{} Exception was not raised\\n'\n '\\nExpected exception message:\\n{}\\n'\n '\\nUsing test: {}'.format(exc_type, str(exc), test))\n #print c # For initial debugging", "def test_02_lunch_order(self):\r\n cr, uid = self.cr, self.uid\r\n self.test_01_lunch_order()\r\n #We have a confirmed order with its associate cashmove\r\n #We execute the cancel function\r\n self.order_one.cancel()\r\n self.order_one = self.lunch_order_line.browse(cr,uid,self.new_id_order_line,context=None)\r\n #We check that the state is cancelled and that the cashmove has been deleted\r\n self.assertEqual(self.order_one.state,'cancelled')\r\n self.assertFalse(self.order_one.cashmove)", "def test_required_properties_order() -> None:\n soup = generate_case(\"required_properties_order\")\n\n tests.html_schema_doc_asserts.assert_undocumented_required(soup, [\"a\", \"b\", \"b\", \"a\"])", "def test_headlines_order(self) -> None:\n last: Tuple[int, str] = (0, \"\")\n\n for headline in self.report.headlines:\n rule: Optional[HeadlineRules] = self.rules.get_headline_rules(headline.name)\n if (not rule) or (rule.order is None):\n continue\n\n last_order, last_headline = last # type: int, str\n if last_order > rule.order:\n self.add_error(\n (\n f\"Rubriken {headline.name} ska komma före \"\n f\"rubriken {last_headline}.\"\n ),\n headline=headline,\n )\n\n last = (rule.order, headline.name)", "def test_create_course(self):\r\n self.assert_created_course()", "def test_entities__EntityOrder__1():\n zope.interface.verify.verifyObject(IEntityOrder, EntityOrder())", "def final_check(self, test_collection):\n assert True", "def testSortOrder(self):\n timestamp = time.time()\n comment_id1 = Comment.ConstructCommentId(timestamp, 0, 0)\n comment_id2 = Comment.ConstructCommentId(timestamp + 1, 0, 0)\n self.assertGreater(comment_id2, comment_id1)", "def _check_course(self, source_course_loc, dest_course_loc, expected_blocks, unexpected_blocks):\r\n history_info = modulestore().get_course_history_info(dest_course_loc)\r\n self.assertEqual(history_info['edited_by'], self.user)\r\n for expected in expected_blocks:\r\n # since block_type has no impact on identity, we can just provide an empty string\r\n source = modulestore().get_item(source_course_loc.make_usage_key(\"\", expected))\r\n pub_copy = modulestore().get_item(dest_course_loc.make_usage_key(\"\", expected))\r\n # everything except previous_version & children should be the same\r\n self.assertEqual(source.category, pub_copy.category)\r\n self.assertEqual(source.update_version, pub_copy.update_version)\r\n self.assertEqual(\r\n self.user, pub_copy.edited_by,\r\n \"{} edited_by {} not {}\".format(pub_copy.location, pub_copy.edited_by, self.user)\r\n )\r\n for field in source.fields.values():\r\n if field.name == 'children':\r\n self._compare_children(field.read_from(source), field.read_from(pub_copy), unexpected_blocks)\r\n else:\r\n self.assertEqual(field.read_from(source), field.read_from(pub_copy))\r\n for unexp in unexpected_blocks:\r\n with self.assertRaises(ItemNotFoundError):\r\n modulestore().get_item(dest_course_loc.make_usage_key(\"\", unexp))", "def assertKeys(self, data, expected):\r\n self.assertEqual(sorted(data.keys()), sorted(expected))", "def assert_response_orders(self, *args, **kwargs):\n self.assert_response_order(*args, **kwargs)\n kwargs['order_by'] = '-' + kwargs['order_by']\n self.assert_response_order(*args, **kwargs)", "def verify_course(self, course, course_id='edX/toy/2012_Fall'):\n assert course_id == str(course.id)", "def test_lpdaac_good(self):\n self.assertIsNone(api.inventory.check(self.lpdaac_order_good))", "def test_01_lunch_order(self):\r\n cr, uid = self.cr, self.uid\r\n self.test_00_lunch_order()\r\n #We receive the order so we confirm the order line so it's state will be 'confirmed'\r\n #A cashmove will be created and we will test that the cashmove amount equals the order line price\r\n self.order_one.confirm()\r\n self.order_one = self.lunch_order_line.browse(cr,uid,self.new_id_order_line,context=None)\r\n #we check that our order_line is a 'confirmed' one and that there are a cashmove linked to that order_line with an amount equals to the order line price:\r\n self.assertEqual(self.order_one.state,'confirmed')\r\n self.assertTrue(self.order_one.cashmove)\r\n self.assertTrue(self.order_one.cashmove[0].amount==-self.order_one.price)", "def test_c(self):\n v1 = versions.Version(version='1.2', name='foo')\n v2 = versions.Version(version='1.2.1', name='bar')\n\n self.assertFalse(v1 >= v2)\n self.assertTrue(v2 >= v1)", "def course_tester(courses):\n\n return False", "def _verify_published_course(courses_published):\r\n self.assertEqual(len(courses_published), 1, len(courses_published))\r\n course = self.findByIdInResult(courses_published, \"head23456\")\r\n self.assertIsNotNone(course, \"published courses\")\r\n self.assertEqual(course.location.course_key.org, \"testx\")\r\n self.assertEqual(course.location.course_key.offering, \"wonderful\")\r\n self.assertEqual(course.category, 'course', 'wrong category')\r\n self.assertEqual(len(course.tabs), 4, \"wrong number of tabs\")\r\n self.assertEqual(course.display_name, \"The most wonderful course\",\r\n course.display_name)\r\n self.assertIsNone(course.advertised_start)\r\n self.assertEqual(len(course.children), 0,\r\n \"children\")", "def test_payment_accepted_order(self):\r\n student1 = UserFactory()\r\n student1.save()\r\n\r\n order1 = Order.get_cart_for_user(student1)\r\n params = {\r\n 'card_accountNumber': '1234',\r\n 'card_cardType': '001',\r\n 'billTo_firstName': student1.first_name,\r\n 'billTo_lastName': u\"\\u2603\",\r\n 'orderNumber': str(order1.id),\r\n 'orderCurrency': 'usd',\r\n 'decision': 'ACCEPT',\r\n 'ccAuthReply_amount': '0.00'\r\n }\r\n\r\n # tests for an order number that doesn't match up\r\n params_bad_ordernum = params.copy()\r\n params_bad_ordernum['orderNumber'] = str(order1.id + 10)\r\n with self.assertRaises(CCProcessorDataException):\r\n payment_accepted(params_bad_ordernum)\r\n\r\n # tests for a reply amount of the wrong type\r\n params_wrong_type_amt = params.copy()\r\n params_wrong_type_amt['ccAuthReply_amount'] = 'ab'\r\n with self.assertRaises(CCProcessorDataException):\r\n payment_accepted(params_wrong_type_amt)\r\n\r\n # tests for a reply amount of the wrong type\r\n params_wrong_amt = params.copy()\r\n params_wrong_amt['ccAuthReply_amount'] = '1.00'\r\n with self.assertRaises(CCProcessorWrongAmountException):\r\n payment_accepted(params_wrong_amt)\r\n\r\n # tests for a not accepted order\r\n params_not_accepted = params.copy()\r\n params_not_accepted['decision'] = \"REJECT\"\r\n self.assertFalse(payment_accepted(params_not_accepted)['accepted'])\r\n\r\n # finally, tests an accepted order\r\n self.assertTrue(payment_accepted(params)['accepted'])", "def test_00_lunch_order(self):\r\n cr, uid = self.cr, self.uid\r\n self.order_one = self.lunch_order_line.browse(cr,uid,self.new_id_order_line,context=None)\r\n #we check that our order_line is a 'new' one and that there are no cashmove linked to that order_line:\r\n self.assertEqual(self.order_one.state,'new')\r\n self.assertEqual(list(self.order_one.cashmove), [])\r\n #we order that orderline so it's state will be 'ordered'\r\n self.order_one.order()\r\n self.order_one = self.lunch_order_line.browse(cr,uid,self.new_id_order_line,context=None)\r\n #we check that our order_line is a 'ordered' one and that there are no cashmove linked to that order_line:\r\n self.assertEqual(self.order_one.state,'ordered')\r\n self.assertEqual(list(self.order_one.cashmove), [])", "def testCourses(self):\n self.person.invokeFactory(type_name=\"FSDCourse\", id=\"test-course\")\n self.failUnless('test-course' in self.person.contentIds())\n self.failUnless('test-course' in [c.id for c in self.person.getCourses()])", "def test_certificate_validations():\n course_runs = CourseRunFactory.create_batch(2)\n programs = ProgramFactory.create_batch(2)\n\n course_runs[0].course.page.certificate_page.save_revision()\n course_runs[1].course.page.certificate_page.save_revision()\n\n programs[0].page.certificate_page.save_revision()\n programs[1].page.certificate_page.save_revision()\n\n course_certificate = CourseRunCertificateFactory(\n course_run=course_runs[0],\n certificate_page_revision=course_runs[\n 1\n ].course.page.certificate_page.get_latest_revision(),\n )\n program_certificate = ProgramCertificateFactory(\n program=programs[0],\n certificate_page_revision=programs[\n 1\n ].page.certificate_page.get_latest_revision(),\n )\n\n # When the revision doesn't match the courseware\n with pytest.raises(\n ValidationError,\n match=f\"The selected certificate page {course_certificate} is not for this course {course_runs[0].course}.\",\n ):\n course_certificate.clean()\n\n with pytest.raises(\n ValidationError,\n match=f\"The selected certificate page {program_certificate} is not for this program {programs[0]}.\",\n ):\n program_certificate.clean()", "def validate_testdata(self):\r\n self._get_tcorder()\r\n for line in self.data:\r\n if not line.startswith(\" \"):\r\n tcname = line.strip(\"\\n\")\r\n continue\r\n if \"[Setup]\" in line:\r\n if \"depends\" in line:\r\n line = line.strip(\"\\n\").split(\"depends\")[1][1:]\r\n depends = line.split()[0].split(',')\r\n self._check_dependency(tcname, depends)\r\n\r\n if self.dependency:\r\n msg = \"Test cases are not in proper dependency order.\\n\"\r\n for i in self.dependency:\r\n msg = msg + i\r\n logger.warn(msg, console=False)\r\n notify.message(msg)\r\n raise DependencyException(msg)\r\n else:\r\n msg = \"Testcases are in correct dependency order.\"\r\n logger.warn(msg)\r\n notify.message(msg)", "def test_init(self):\n test_order = Order(\"1\", \"Large\", \"Thin\", \"Cheese\")\n self.assertEqual(test_order.quantity, \"1\")\n self.assertEqual(test_order.size, \"Large\")\n self.assertEqual(test_order.crust, \"Thin\")\n self.assertEqual(test_order.toppings, \"Cheese\")", "def testProtractedNSESanityChecks(self):\n self.assertGreater(self.c3.get_species_richness(1), self.c2.get_species_richness(1))\n self.assertLess(self.c4.get_species_richness(1), self.c3.get_species_richness(1))", "def check_prerequisites(self):\n self.courses_not_completed = self.prerequisite_set - set(self.user_courses.keys())", "def test_c(self):\n v1 = versions.Version(version='1.2', name='foo')\n v2 = versions.Version(version='1.2.1', name='bar')\n\n self.assertTrue(v1 <= v2)\n self.assertFalse(v2 <= v1)", "def test_entities__EntityOrder____iter____1(entityOrder):\n assert ([\n 'IcemacAddressbookAddressbookAddressbook',\n 'IcemacAddressbookPersonPerson',\n 'IcemacAddressbookPersonPersondefaults',\n 'IcemacAddressbookAddressPostaladdress',\n 'IcemacAddressbookAddressPhonenumber',\n 'IcemacAddressbookAddressEmailaddress',\n 'IcemacAddressbookAddressHomepageaddress',\n 'IcemacAddressbookFileFileFile',\n 'IcemacAddressbookKeywordKeyword',\n ] == list(iter(entityOrder)))", "def _check_dependency(self, tcname, depends):\r\n tcindex = self.tcorder.index(tcname)\r\n dependsindex = []\r\n for i in depends:\r\n dependsindex.append(self.tcorder.index(i))\r\n for i in dependsindex:\r\n if tcindex < i:\r\n msg = \"%s must be ordered after %s\\n\" %(tcname, self.tcorder[i])\r\n self.dependency.append(msg)", "def cross_validate(self, contents, required=None, forbidden=None):\n if required:\n for item in required:\n self.assertTrue(\n item in contents,\n \"Required entry [{item}] not found in:\\n{contents}\".format(\n item=item, contents=contents\n )\n )\n if forbidden:\n for item in forbidden:\n self.assertTrue(\n item not in contents,\n \"Forbidden entry [{item}] found in:\\n{contents}\".format(\n item=item, contents=contents\n )\n )", "def test_dates_must_be_in_order_ended(make_one):\n w = make_one(\"TEST-1\")\n w.queued_at = datetime.datetime.now() - datetime.timedelta(days=14)\n w.ended_at = datetime.datetime.now() - datetime.timedelta(days=20)\n\n with pytest.raises(ValidationError):\n w.check_dates()", "def test_c(self):\n v1 = versions.Version(version='1.2.0', name='foo')\n v2 = versions.Version(version='1.2', name='bar')\n\n self.assertFalse(v1 < v2)\n self.assertTrue(v2 < v1)", "def test_open_ru_ballance_after_make_position(self, ):\n (mid, aid) = self.make_money_and_account() #@UnusedVariable\n self.load_data_into_account(aid)\n accs = self.model.list_view_accounts().fetchall()\n self.assertEqual(1, len(accs))\n before = accs[0]['current_money']\n self.model.tamake_positions_for_whole_account(aid)\n accs = self.model.list_view_accounts().fetchall()\n after = accs[0]['current_money']\n self.assertAlmostEqual(before, after)", "def test_validate_wc3(self):\r\n assert self.wc2_tree != 0", "def checks(self, error_margin=0.1):\n\n # Check all compartments are positive\n for label in self.labels:\n assert self.compartments[label] >= 0.", "def test_create_confirm_order_details(self):\n pass", "def test_place_multiple_orders(self):\n self.orders_list.place_order(self.order)\n self.orders_list.place_order(self.order)\n self.orders_list.place_order(self.order)\n self.assertEqual(len(self.orders_list.orders_list), 3)\n self.assertEqual(self.orders_list.orders_list[2].order_id, 2)", "def test_05_purchase_order(self):\n # Create purchase Order and check purchase order was created correctly\n # (without lines)\n order = self.create_po()\n self.assertTrue(order)\n self.assertFalse(order.order_line)\n\n # Add one sellable line (first line)\n sellable_product = self.product_obj.browse(self.sellable_product)\n self.create_pol(order, sellable_product)\n self.assertTrue(order.order_line)\n self.assertEquals(len(order.order_line), 1)\n self.assertIn(sellable_product, order.order_line.mapped('product_id'))\n self.assertEquals(order.order_line.product_id.state2, 'sellable')\n\n # Add one draft line (second line)\n draft_product = self.product_obj.browse(self.draft_product)\n self.create_pol(order, draft_product)\n self.assertEquals(len(order.order_line), 2)\n self.assertIn(draft_product, order.order_line.mapped('product_id'))\n self.assertEquals(set(order.order_line.mapped('product_id.state2')),\n set(['sellable', 'draft']))\n\n # Add one obsolete line. This will raise an exception.\n obsolete_product = self.product_obj.browse(self.obsolete_product)\n with self.assertRaises(exceptions.Warning):\n self.create_pol(order, obsolete_product)", "def test_basic_end(self):\n self.assertLines(\n [\"-E\", \"2\", \"examples/dummy4.csv\"], [\"a,b,c\", \"1,2,3\", \"4,5,6\",]\n )", "def testCC(self):\n self.assertEqual(\n self.cc,\n self.cd.cc\n )", "def test_create_course_case_change(self):\r\n self.course_data['number'] = 'capital'\r\n self.client.ajax_post('/course/', self.course_data)\r\n cache_current = self.course_data['org']\r\n self.course_data['org'] = self.course_data['org'].lower()\r\n self.assert_course_creation_failed('There is already a course defined with the same organization, course number, and course run. Please change either organization or course number to be unique.')\r\n self.course_data['org'] = cache_current\r\n\r\n self.client.ajax_post('/course/', self.course_data)\r\n cache_current = self.course_data['number']\r\n self.course_data['number'] = self.course_data['number'].upper()\r\n self.assert_course_creation_failed('There is already a course defined with the same organization, course number, and course run. Please change either organization or course number to be unique.')", "def test_pre_order_0_3(bst_right_balance):\n assert tuple(bst_right_balance.pre_order()) == (6, 5, 2, 8, 7, 9)", "def test_case_customer_part_orders(self):\n pass", "def test_check_validity(\n student_names, project_names, supervisor_names, capacities, seed, clean\n):\n\n *_, game = make_game(\n student_names, project_names, supervisor_names, capacities, seed, clean\n )\n\n game.solve()\n assert game.check_validity()", "def test_invalid_sequence(self):\n b1 = board.Board(self.small_plain)\n start = np.array((3, 3), dtype='int')\n k1 = knight.Knight(b1,start)\n # set move sequence\n move_seq = [0, 5, 6, 6, 3, 2]\n # check sequence validity\n (cost, valid, endloc) = k1.validate_sequence(move_seq)\n self.assertFalse(valid)\n self.assertEqual(cost, 0)", "def test_ordering_create(self):\n test_questionnaire = Questionnaire.objects.create(title='test_ordering_1')\n Question.objects.create(\n questionnaire=test_questionnaire,\n question_type='boolean',\n question_text='question 1',\n )\n Question.objects.create(\n questionnaire=test_questionnaire,\n question_type='boolean',\n question_text='question 2',\n )\n\n self.assertEqual(test_questionnaire.questions.get(question_text='question 1').order, 0)\n self.assertEqual(test_questionnaire.questions.get(question_text='question 2').order, 1)", "def test_c(self):\n v1 = versions.Version(version='1.2.0', name='foo')\n v2 = versions.Version(version='1.2', name='bar')\n\n self.assertTrue(v1 > v2)\n self.assertFalse(v2 > v1)", "def test_court_order_file(session, minio_server, test_name, expected_code, expected_msg):\n business = factory_business('BC1234567')\n filing = copy.deepcopy(COURT_ORDER_FILING_TEMPLATE)\n\n if test_name == 'SUCCESS':\n filing['filing']['courtOrder']['fileKey'] = _upload_file(letter, invalid=False)\n elif test_name == 'FAIL_INVALID_FILE_KEY_SIZE':\n filing['filing']['courtOrder']['fileKey'] = _upload_file(letter, invalid=True)\n\n err = validate(business, filing)\n\n if expected_code:\n assert err.code == expected_code\n assert lists_are_equal(err.msg, expected_msg)\n else:\n assert err is None", "def test_program_course_auto_position():\n first_course = CourseFactory.create(position_in_program=None)\n assert first_course.position_in_program == 1\n second_course = CourseFactory.create(\n program=first_course.program, position_in_program=None\n )\n assert second_course.position_in_program == 2", "def test_careers_invalid_student(self):\n student_id = '1234567890'\n result = self.ucuenca.schedule(student_id)\n self.assertFalse(result)", "def test_pre_order_0_4(bst_wiki):\n assert tuple(bst_wiki.pre_order()) == (7, 4, 2, 1, 3, 6, 5, 9, 8)", "def test_validate_crown(session, desc, valid, message_content):\n # setup\n for reg_type in CrownChargeTypes:\n if validator.validate_allowed_type(reg_type.value) != '':\n continue\n\n json_data = copy.deepcopy(FINANCING)\n json_data['type'] = reg_type.value\n del json_data['trustIndenture']\n if reg_type.value == 'OT' and desc != DESC_MISSING_OT_DESC:\n json_data['otherTypeDescription'] = 'TEST OTHER DESC'\n message_content = None\n elif reg_type.value != 'OT' and desc == DESC_INCLUDES_OT_DESC:\n json_data['otherTypeDescription'] = 'TEST OTHER DESC'\n elif desc == DESC_MISSING_OT_DESC or desc == DESC_INCLUDES_OT_DESC:\n message_content = None\n if desc != DESC_EXCLUDES_LY:\n del json_data['lifeYears']\n if desc != DESC_INFINITY_INVALID:\n json_data['lifeInfinite'] = True\n else:\n json_data['lifeInfinite'] = False\n if desc == DESC_MISSING_GC:\n del json_data['generalCollateral']\n if desc != DESC_INCLUDES_VC:\n del json_data['vehicleCollateral']\n\n # print('REG TYPE: ' + str(json_data['type']))\n error_msg = validator.validate(json_data)\n if valid:\n assert error_msg == ''\n elif message_content:\n # print(error_msg)\n assert error_msg != ''\n assert error_msg.find(message_content) != -1", "def test_process_order(self):\n expected_contents = self.fill_session_cart()\n\n response = self.client.post(\n self.CHECKOUT_URL, self.build_checkout_form())\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"Your order was placed.\")\n\n placed_order = OrderInfo.objects.get()\n order_contents = placed_order.ordercontents_set.all()\n # arbitrary 5 seconds to account for some fault\n self.assertTrue(\n timezone.now() - placed_order.ordered < timedelta(seconds=5))\n self.assertEqual(len(expected_contents), len(order_contents))\n for expected in expected_contents:\n db_contents = order_contents.get(menu_item__id=expected['id'])\n dict_from_db = {\n 'id': db_contents.menu_item.id,\n 'name': db_contents.menu_item.name,\n 'price': db_contents.menu_item.price,\n 'amount': db_contents.amount,\n 'cost': db_contents.cost,\n }\n self.assertEqual(expected, dict_from_db)", "def test_courseware_nav(self):\r\n # Navigate to the courseware page from the info page\r\n self.course_info_page.visit()\r\n self.tab_nav.go_to_tab('Courseware')\r\n\r\n # Check that the courseware navigation appears correctly\r\n EXPECTED_SECTIONS = {\r\n 'Test Section': ['Test Subsection'],\r\n 'Test Section 2': ['Test Subsection 2', 'Test Subsection 3']\r\n }\r\n\r\n actual_sections = self.course_nav.sections\r\n for section, subsections in EXPECTED_SECTIONS.iteritems():\r\n self.assertIn(section, actual_sections)\r\n self.assertEqual(actual_sections[section], EXPECTED_SECTIONS[section])\r\n\r\n # Navigate to a particular section\r\n self.course_nav.go_to_section('Test Section', 'Test Subsection')\r\n\r\n # Check the sequence items\r\n EXPECTED_ITEMS = ['Test Problem 1', 'Test Problem 2', 'Test HTML']\r\n\r\n actual_items = self.course_nav.sequence_items\r\n self.assertEqual(len(actual_items), len(EXPECTED_ITEMS))\r\n for expected in EXPECTED_ITEMS:\r\n self.assertIn(expected, actual_items)", "def test_ordinal_third(self):\n with open(\"tests/data_files/presidents_day_dates.txt\", \"r\") as dates_file:\n dates_strings_list = dates_file.read().splitlines()\n\n for date_string in dates_strings_list:\n test_date = date(*[int(p) for p in date_string.split(\"-\")])\n presidents_day = get_by_values(Ordinal.third, Weekday.Monday, Month.February, test_date.year)\n\n self.assertEquals(test_date, presidents_day)", "def test_entities__Entity__getFieldOrder__1(entity):\n assert [] == entity.getFieldOrder()", "def test_kyc_post_legal(self):\n pass", "def test_cash(currency, expected):\n cash = Cash(currency)\n\n assert cash.cash_like\n assert cash.currency == expected", "def test_create422_below_min_quantity(self):\n card_order_editable = CardOrderEditable(\n quantity = 1\n )\n with self.assertRaises(Exception) as context:\n self.api.create(self.card, self.card_order_editable)\n self.assertTrue(\"Number of cards in order must be at least\" in context.exception.__str__())", "def test_card_sort_ace_greater_than_king():\n spade_ace_card = Card(Suit.SPADE, 1)\n spade_king_card = Card(Suit.SPADE, 13)\n assert spade_ace_card > spade_king_card", "def test_sad_purchasePlaces_negative(self):\n\n num_places = -1\n for competition in self.competitions:\n rv = self.app.post(\n \"/purchasePlaces\",\n data={\n \"places\": num_places,\n \"club\": self.clubs[0][\"name\"],\n \"competition\": competition[\"name\"],\n },\n )\n\n assert rv.status_code in [400]\n assert b\"Something went wrong-please try again\" in rv.data", "def test_create_course_duplicate_number(self):\r\n self.client.ajax_post('/course/', self.course_data)\r\n self.course_data['display_name'] = 'Robot Super Course Two'\r\n self.course_data['run'] = '2013_Summer'\r\n\r\n self.assert_course_creation_failed('There is already a course defined with the same organization, course number, and course run. Please change either organization or course number to be unique.')", "def testCC(self):\n self.assertEqual(\n self.cc,\n self.ccr.cc\n )\n\n self.assertEqual(\n None,\n self.ccr_bad.cc\n )", "def test_assert_truth(self):\n\n # Confused? This video should help:\n #\n # http://bit.ly/about_asserts\n\n self.assertTrue(True) # This should be true", "def test_num_reac(self):\n self.assertEqual(self.Nreac, 1)", "def test_acknowledge_orders(self):\n pass", "def _check(self):\n assert isinstance(self._price, int)\n assert self._price >= 0\n assert isinstance(self._units, int)\n assert self._units > 0\n assert self._side == OrderSide.BUY or self._side == OrderSide.SELL\n assert self._type == OrderType.LIMIT or self._type == OrderType.CANCEL\n assert isinstance(self._market, int)\n assert self._market > 0", "def test_items_in_order(self):\n t1 = datetime.now()\n t2 = t1 + timedelta(seconds=1)\n s1 = ListTimeSeries([[t1, 123]])\n s2 = ListTimeSeries([[t2, 456]])\n c = CompositeTimeSeries(\"abc\", [s1, s2])\n rows = [r for r in c.rows()]\n assert_that(rows, is_(equal_to([\n [t1, 123], [t2, 456]\n ])))", "def test_validate_valid_org(self):\r\n assert self.org_tree != 0", "def test_ordering(self):\r\n def verify_order(source_usage_key, parent_usage_key, source_position=None):\r\n usage_key = self._duplicate_item(parent_usage_key, source_usage_key)\r\n parent = self.get_item_from_modulestore(parent_usage_key)\r\n children = parent.children\r\n if source_position is None:\r\n self.assertFalse(source_usage_key in children, 'source item not expected in children array')\r\n self.assertEqual(\r\n children[len(children) - 1],\r\n usage_key,\r\n \"duplicated item not at end\"\r\n )\r\n else:\r\n self.assertEqual(\r\n children[source_position],\r\n source_usage_key,\r\n \"source item at wrong position\"\r\n )\r\n self.assertEqual(\r\n children[source_position + 1],\r\n usage_key,\r\n \"duplicated item not ordered after source item\"\r\n )\r\n\r\n verify_order(self.problem_usage_key, self.seq_usage_key, 0)\r\n # 2 because duplicate of problem should be located before.\r\n verify_order(self.html_usage_key, self.seq_usage_key, 2)\r\n verify_order(self.seq_usage_key, self.chapter_usage_key, 0)\r\n\r\n # Test duplicating something into a location that is not the parent of the original item.\r\n # Duplicated item should appear at the end.\r\n verify_order(self.html_usage_key, self.usage_key)", "def test_get_order(self):\n pass", "def testSortedNotes(self):\n for simple_score in self.simple_scores.values():\n notes = simple_score.sorted_notes\n assert all(notes[i].start_time <= notes[i + 1].start_time\n for i in range(len(notes) - 1))", "def _validate_order(self, order, account, quotation, strategy_name):\n if quotation.volume < order.volume:\n print('Limit volume bardata volume:%d, untraded volume:%d, date:%s' % \n (quotation.volume, order.volume, quotation.date))\n return False\n\n # Calculate this order's margin\n if order.offset == 'open': \n posid = order.instrument+'-'+order.direction\n margin_ratio = account.portfolios[strategy_name].positions[posid].margin_ratio = quotation.margin_ratio\n multiplier = account.portfolios[strategy_name].positions[posid].multiplier = quotation.multiplier\n margin = order.price * order.volume * margin_ratio * multiplier\n if margin > account.available:\n print('Lack of capital available:%f, margin:%f' % (account.available, margin))\n return False\n return True\n elif order.offset == 'close':\n if order.direction == 'long':\n posid = order.instrument+'-'+'short'\n elif order.direction == 'short':\n posid = order.instrument+'-'+'long'\n hold_posi_quantity = account.portfolios[strategy_name].positions[posid].total_position\n try:\n assert order.volume > 0, 'close volume need greater than zero %d'%(order.volume)\n assert order.volume <= hold_posi_quantity, 'close volume need lower than hold posi quantity %d:%d'%(order.volume,\n hold_posi_quantity)\n except AssertionError as e:\n #print(e)\n return False\n return True\n elif order.offset == 'closetoday':\n order.direction = 'long' if order.direction == 'short' else 'short'\n posid = order.instrument+'-'+order.direction\n td_hold_posi_quantity = account.portfolios[strategy_name].positions[posid].today_position\n try:\n assert order.volume < td_hold_posi_quantity, 'close today position need lower than today hold position'\n except AssertionError as e:\n print(e)\n return False\n return True\n else:\n return False", "def test_contracts(self):\n correct_contracts = [factories.ProjectContract(projects=self.projects,\n status=ProjectContract.STATUS_CURRENT) for i in range(3)]\n response = self._get()\n self.assertEqual(response.status_code, 200)\n contracts = response.context['contracts']\n self.assertEqual(len(contracts), 3)\n for i in range(3):\n self.assertTrue(correct_contracts[i] in contracts)", "def testConsistency(self):\n #self.assertAlmostEqual(self.fxlinkedcashflow.amount(),0)", "def test_entities__EntityOrder__up__3(entityOrder):\n person = IEntity(IPerson)\n assert 1 == entityOrder.get(person)\n with pytest.raises(ValueError):\n entityOrder.up(person, 2)", "def test_valid_sequence(self):\n b1 = board.Board(self.small_plain)\n start = np.array((3, 3), dtype='int')\n k1 = knight.Knight(b1,start)\n # set move sequence\n move_seq = [0, 5, 6, 3, 2]\n # check sequence validity\n (cost, valid, endloc) = k1.validate_sequence(move_seq)\n self.assertTrue(valid)\n self.assertEqual(cost, len(move_seq))\n self.assertTrue((k1.position == start).all())\n # change the board layout to reflect the move\n for each in move_seq:\n k1.execute_move(each)\n self.assertTrue((k1.position == np.array((2, 3), dtype='int')).all())\n # confirm state of board\n with capture_output() as (out, _):\n b1.display()\n my_out = out.getvalue().strip()\n out.close()\n out_list = [ each.strip() for each in\n \"\"\". . . . . . . .\n . . . x x S . .\n . . . K . x x S\n . . . S x S x x\n . . . x x . . .\n . . . x S . . .\n . . . . . . . .\n . . . . . . . .\"\"\".split('\\n')]\n expected_out = '\\n'.join(out_list)\n self.assertEqual(my_out, expected_out)", "def test_successful_add_instructor():\n assert add_instructor('mary', 'jones', 'instructor')", "def test_invalid_grid_values_non_ascending(self):\n self.assertRaises(ValueError, splines.Spline.__init__, splines.Spline,\n np.array([0.1, 0.5, 0.4]), np.array([1, 2, 3, 4]))", "def test_special_case(self):\n cases = [\n ('3467875434578764345789654', False),\n ('AAAAAAAAAAA', False),\n ('', False),\n ]\n for titulo_eleitoral, is_valid in cases:\n self.assertEqual(self.titulo_eleitoral.validate(titulo_eleitoral), is_valid)", "def testin_order_0_4(bst_wiki):\n assert tuple(bst_wiki.in_order()) == (1, 2, 3, 4, 5, 6, 7, 8, 9)", "def test_lpdaac_bad(self):\n with self.assertRaises(InventoryException):\n api.inventory.check(self.lpdaac_order_bad)", "def test_token_order(self):\n tokens = [Token(1), Token(2), Token(3), Token(4)]\n tokens_equal = [Token(1), Token(1)]\n self._check_sequence_consistency(tokens)\n self._check_sequence_consistency(tokens_equal, equal=True)", "def _perform_assertion(self,\n balance_orig: list,\n balance_exp: list):\n\n results_annual = cpfhelpers.calc_annual_change(\n self.salary * 12,\n self.bonus,\n self.dob,\n balance_orig[0],\n balance_orig[1],\n balance_orig[2],\n account_deltas={},\n date_start=self.date_start)\n\n assert str(round(balance_exp[0], 2)) == results_annual[strings.OA]\n assert str(round(balance_exp[1], 2)) == results_annual[strings.SA]\n assert str(round(balance_exp[2], 2)) == results_annual[strings.MA]", "def check_item_order(context, container, position, item, attribute, value):\n index = dict(first=0, last=-1)[position]\n assert_equal(value, list(get_container(context, container)[f\"{item}s\"].values())[index][attribute])", "def _validate_positions():\n positions = set([field[\"position\"] for field in fields])\n if len(positions) != len(fields):\n raise IncorrectPredictionsTableOrder", "def assert_created_course(self, number_suffix=None):\r\n test_course_data = {}\r\n test_course_data.update(self.course_data)\r\n if number_suffix:\r\n test_course_data['number'] = '{0}_{1}'.format(test_course_data['number'], number_suffix)\r\n course_key = _get_course_id(test_course_data)\r\n _create_course(self, course_key, test_course_data)\r\n # Verify that the creator is now registered in the course.\r\n self.assertTrue(CourseEnrollment.is_enrolled(self.user, course_key))\r\n return test_course_data", "def verify_that_the_acl_was_not_set_to_rtacltest3(driver):\n assert wait_on_element(driver, 5, f'//div[contains(text(),\"rt-acl-test-1\")]//button', 'clickable')\n driver.find_element_by_xpath(f'//div[contains(text(),\"rt-acl-test-1\")]//button').click()\n time.sleep(3)\n assert wait_on_element(driver, 5, f'//tr[contains(.,\"rt-acl-test-3\")]//mat-icon[text()=\"more_vert\"]', 'clickable')\n driver.find_element_by_xpath(f'//tr[contains(.,\"rt-acl-test-3\")]//mat-icon[text()=\"more_vert\"]').click()\n time.sleep(1)\n assert wait_on_element(driver, 5, '//button[normalize-space(text())=\"View Permissions\"]')\n driver.find_element_by_xpath('//button[normalize-space(text())=\"View Permissions\"]').click()\n assert wait_on_element(driver, 5, '//div[contains(text(),\"User - games\")]') is False", "def test_proper(self):\n\n self.assertTrue(self.cs.isProper)\n self.assertFalse(self.cs.isDegenerate)", "def test_buyTicket_Valid_Paramaters():\n old_venue_balance = testVenue.wallet\n assert testUser3.buyTicket(testTicket3)\n assert testTicket3 in testUser3.inventory\n assert not testTicket3.for_sale\n assert testUser3.wallet == 950\n assert testVenue.wallet == old_venue_balance + testTicket3.list_price", "def test_dates_must_be_in_order_ended_before_started(make_one):\n w = make_one(\"TEST-1\")\n w.queued_at = datetime.datetime.now() - datetime.timedelta(days=14)\n w.started_at = datetime.datetime.now() - datetime.timedelta(days=10)\n w.ended_at = datetime.datetime.now() - datetime.timedelta(days=11)\n\n with pytest.raises(ValidationError):\n w.check_dates()", "def test_bom(self):\n self.validate_goal_for('game-20110901-055435-5a8e3666.html',\n u'Squiddy',\n 'BOM')", "def test_check_category_input_3(self):\n choices = [(1, 'choice 1'), (2, 'choice 2')]\n assert validation.check_category_input(2, choices) == True", "def test_order_warnings(self):\n\n o1 = OrderTest.create_order_1()\n\n # make sure there are no warnings now, one clean order\n self.assertEqual(len(o1.warnings()), 0)\n\n o2 = OrderTest.create_order_2(o1.inmate)\n\n # make sure there is a prior-order warning\n self.assertTrue(\"Patron received an order less than 3 months ago\" in o2.warnings())\n\n # make sure there's a prior-book warning\n self.assertTrue(True in [\"Patron already received\" in warning for warning in o2.warnings()])\n self.assertFalse(True in [\"blah blah blah this isn't a warning\" in warning for warning in o2.warnings()])\n\n # make sure we haven't triggered the same-book warning\n self.assertFalse(True in [\"Two books in this\" in warning for warning in o2.warnings()])\n\n # Add another book\n b3 = models.Book()\n b3.order = o2\n b3.title = \"dictionary\"\n b3.full_clean()\n b3.save()\n\n # ...and test if it triggers the same-book warning\n self.assertTrue(True in [\"Two books in this\" in warning for warning in o2.warnings()])", "def test_valid_general_collateral():\n is_valid, errors = validate(GENERAL_COLLATERAL, 'generalCollateral', 'ppr')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert is_valid" ]
[ "0.6418377", "0.6131915", "0.5973272", "0.59159654", "0.5835521", "0.5678925", "0.56525654", "0.5622847", "0.55975693", "0.55640495", "0.55434185", "0.5528798", "0.55012167", "0.54883766", "0.54165864", "0.539526", "0.53826475", "0.5382484", "0.5381499", "0.5373344", "0.5369267", "0.53634447", "0.53475976", "0.53415257", "0.5331142", "0.53071415", "0.5304982", "0.52964115", "0.52892596", "0.528757", "0.5283849", "0.52786005", "0.5273312", "0.52682364", "0.52681416", "0.52654225", "0.5254218", "0.5246269", "0.5228364", "0.522566", "0.52162325", "0.52015185", "0.51986295", "0.5192964", "0.51903456", "0.51850194", "0.51780653", "0.5176057", "0.51694614", "0.5159642", "0.5157856", "0.5157655", "0.51499826", "0.5145449", "0.5132209", "0.5130731", "0.5128544", "0.512539", "0.51246595", "0.51242906", "0.51204294", "0.5119218", "0.51187223", "0.5112361", "0.5111356", "0.51104945", "0.51104623", "0.5107015", "0.5097157", "0.50949216", "0.5094176", "0.5093978", "0.509298", "0.50906706", "0.508699", "0.50768596", "0.5074144", "0.5068544", "0.50680876", "0.5063735", "0.5061489", "0.50594467", "0.50592154", "0.50575936", "0.5056615", "0.50565016", "0.50558513", "0.5051773", "0.5050219", "0.50486666", "0.5045339", "0.5044879", "0.50408393", "0.5039757", "0.5039538", "0.5038809", "0.50377804", "0.50336677", "0.5031801", "0.5026059" ]
0.67318577
0
Assert valid court order.
def test_court_order_file(session, minio_server, test_name, expected_code, expected_msg): business = factory_business('BC1234567') filing = copy.deepcopy(COURT_ORDER_FILING_TEMPLATE) if test_name == 'SUCCESS': filing['filing']['courtOrder']['fileKey'] = _upload_file(letter, invalid=False) elif test_name == 'FAIL_INVALID_FILE_KEY_SIZE': filing['filing']['courtOrder']['fileKey'] = _upload_file(letter, invalid=True) err = validate(business, filing) if expected_code: assert err.code == expected_code assert lists_are_equal(err.msg, expected_msg) else: assert err is None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_court_orders(session, test_status, expected_code, expected_msg):\n business = factory_business('BC1234567')\n filing = copy.deepcopy(COURT_ORDER_FILING_TEMPLATE)\n del filing['filing']['courtOrder']['fileKey']\n if test_status == 'FAIL':\n del filing['filing']['courtOrder']['orderDetails']\n filing['filing']['courtOrder']['effectOfOrder'] = 'invalid'\n err = validate(business, filing)\n\n if expected_code:\n assert err.code == expected_code\n assert lists_are_equal(err.msg, expected_msg)\n else:\n assert err is None", "def test_case_customer_complete_courseevent_order(self):", "def test_order_constraint(self):\n orders_placed = [25, 25, 25]\n with self.assertRaises(Exception):\n analyse_uncertain_demand.UncertainDemand(\n orders=orders_placed,\n sku='Rx493-90',\n lead_time=Decimal(4),\n unit_cost=Decimal(40),\n reorder_cost=Decimal(400),\n retail_price=Decimal(600),\n currency='USD'\n )", "def test_validate_good_order(self):\n for proj in testorders.good_test_projections:\n valid_order = copy.deepcopy(self.base_order)\n valid_order['projection'] = {proj: testorders.good_test_projections[proj]}\n\n try:\n good = api.validation(valid_order, self.staffuser.username)\n except ValidationException as e:\n self.fail('Raised ValidationException: {}'.format(e.message))", "def test_validate_valid_crisis(self):\r\n assert self.crisis_tree != 0", "def verify_courses(self, courses):\n assert len(courses) == 1\n self.verify_course(courses[0])", "def test_order(self):\n\n # issue a valid query\n # Assure proper execution, and get results from quilt_history\n o = str(quilt_test_core.call_quilt_script('quilt_submit.py', [\n '-y', 'out_of_order']))\n\n o = self.check_query_and_get_results3(o)\n\n # Check results\n # assure that results are in order\n l = []\n for i in xrange(1, 6):\n searchStr = \"{'timestamp': \" + str(i) + '}'\n index = o.find(searchStr)\n logging.debug(\"looking for string: \" + searchStr)\n self.assertTrue(index != -1)\n l.append(index)\n\n isSorted = all(l[i] <= l[i + 1] for i in xrange(len(l) - 1))\n self.assertTrue(isSorted)", "def test_validate_bad_orders(self):\n exc_type = ValidationException\n invalid_order = copy.deepcopy(self.base_order)\n c = 0 # For initial debugging\n\n for proj in testorders.good_test_projections:\n invalid_order['projection'] = {proj: testorders.good_test_projections[proj]}\n\n invalid_list = testorders.InvalidOrders(invalid_order, self.base_schema, abbreviated=True)\n\n for order, test, exc in invalid_list:\n # issues getting assertRaisesRegExp to work correctly\n with self.assertRaises(exc_type):\n try:\n c += 1\n api.validation(order, self.staffuser.username)\n except exc_type as e:\n if str(exc) in str(e):\n raise\n else:\n self.fail('\\n\\nExpected in exception message:\\n{}'\n '\\n\\nException message raised:\\n{}'\n '\\n\\nUsing test {}'.format(str(exc), str(e), test))\n else:\n self.fail('\\n{} Exception was not raised\\n'\n '\\nExpected exception message:\\n{}\\n'\n '\\nUsing test: {}'.format(exc_type, str(exc), test))\n #print c # For initial debugging", "def test_02_lunch_order(self):\r\n cr, uid = self.cr, self.uid\r\n self.test_01_lunch_order()\r\n #We have a confirmed order with its associate cashmove\r\n #We execute the cancel function\r\n self.order_one.cancel()\r\n self.order_one = self.lunch_order_line.browse(cr,uid,self.new_id_order_line,context=None)\r\n #We check that the state is cancelled and that the cashmove has been deleted\r\n self.assertEqual(self.order_one.state,'cancelled')\r\n self.assertFalse(self.order_one.cashmove)", "def test_required_properties_order() -> None:\n soup = generate_case(\"required_properties_order\")\n\n tests.html_schema_doc_asserts.assert_undocumented_required(soup, [\"a\", \"b\", \"b\", \"a\"])", "def test_headlines_order(self) -> None:\n last: Tuple[int, str] = (0, \"\")\n\n for headline in self.report.headlines:\n rule: Optional[HeadlineRules] = self.rules.get_headline_rules(headline.name)\n if (not rule) or (rule.order is None):\n continue\n\n last_order, last_headline = last # type: int, str\n if last_order > rule.order:\n self.add_error(\n (\n f\"Rubriken {headline.name} ska komma före \"\n f\"rubriken {last_headline}.\"\n ),\n headline=headline,\n )\n\n last = (rule.order, headline.name)", "def test_create_course(self):\r\n self.assert_created_course()", "def test_entities__EntityOrder__1():\n zope.interface.verify.verifyObject(IEntityOrder, EntityOrder())", "def final_check(self, test_collection):\n assert True", "def testSortOrder(self):\n timestamp = time.time()\n comment_id1 = Comment.ConstructCommentId(timestamp, 0, 0)\n comment_id2 = Comment.ConstructCommentId(timestamp + 1, 0, 0)\n self.assertGreater(comment_id2, comment_id1)", "def _check_course(self, source_course_loc, dest_course_loc, expected_blocks, unexpected_blocks):\r\n history_info = modulestore().get_course_history_info(dest_course_loc)\r\n self.assertEqual(history_info['edited_by'], self.user)\r\n for expected in expected_blocks:\r\n # since block_type has no impact on identity, we can just provide an empty string\r\n source = modulestore().get_item(source_course_loc.make_usage_key(\"\", expected))\r\n pub_copy = modulestore().get_item(dest_course_loc.make_usage_key(\"\", expected))\r\n # everything except previous_version & children should be the same\r\n self.assertEqual(source.category, pub_copy.category)\r\n self.assertEqual(source.update_version, pub_copy.update_version)\r\n self.assertEqual(\r\n self.user, pub_copy.edited_by,\r\n \"{} edited_by {} not {}\".format(pub_copy.location, pub_copy.edited_by, self.user)\r\n )\r\n for field in source.fields.values():\r\n if field.name == 'children':\r\n self._compare_children(field.read_from(source), field.read_from(pub_copy), unexpected_blocks)\r\n else:\r\n self.assertEqual(field.read_from(source), field.read_from(pub_copy))\r\n for unexp in unexpected_blocks:\r\n with self.assertRaises(ItemNotFoundError):\r\n modulestore().get_item(dest_course_loc.make_usage_key(\"\", unexp))", "def assertKeys(self, data, expected):\r\n self.assertEqual(sorted(data.keys()), sorted(expected))", "def assert_response_orders(self, *args, **kwargs):\n self.assert_response_order(*args, **kwargs)\n kwargs['order_by'] = '-' + kwargs['order_by']\n self.assert_response_order(*args, **kwargs)", "def verify_course(self, course, course_id='edX/toy/2012_Fall'):\n assert course_id == str(course.id)", "def test_lpdaac_good(self):\n self.assertIsNone(api.inventory.check(self.lpdaac_order_good))", "def test_01_lunch_order(self):\r\n cr, uid = self.cr, self.uid\r\n self.test_00_lunch_order()\r\n #We receive the order so we confirm the order line so it's state will be 'confirmed'\r\n #A cashmove will be created and we will test that the cashmove amount equals the order line price\r\n self.order_one.confirm()\r\n self.order_one = self.lunch_order_line.browse(cr,uid,self.new_id_order_line,context=None)\r\n #we check that our order_line is a 'confirmed' one and that there are a cashmove linked to that order_line with an amount equals to the order line price:\r\n self.assertEqual(self.order_one.state,'confirmed')\r\n self.assertTrue(self.order_one.cashmove)\r\n self.assertTrue(self.order_one.cashmove[0].amount==-self.order_one.price)", "def test_c(self):\n v1 = versions.Version(version='1.2', name='foo')\n v2 = versions.Version(version='1.2.1', name='bar')\n\n self.assertFalse(v1 >= v2)\n self.assertTrue(v2 >= v1)", "def course_tester(courses):\n\n return False", "def _verify_published_course(courses_published):\r\n self.assertEqual(len(courses_published), 1, len(courses_published))\r\n course = self.findByIdInResult(courses_published, \"head23456\")\r\n self.assertIsNotNone(course, \"published courses\")\r\n self.assertEqual(course.location.course_key.org, \"testx\")\r\n self.assertEqual(course.location.course_key.offering, \"wonderful\")\r\n self.assertEqual(course.category, 'course', 'wrong category')\r\n self.assertEqual(len(course.tabs), 4, \"wrong number of tabs\")\r\n self.assertEqual(course.display_name, \"The most wonderful course\",\r\n course.display_name)\r\n self.assertIsNone(course.advertised_start)\r\n self.assertEqual(len(course.children), 0,\r\n \"children\")", "def test_payment_accepted_order(self):\r\n student1 = UserFactory()\r\n student1.save()\r\n\r\n order1 = Order.get_cart_for_user(student1)\r\n params = {\r\n 'card_accountNumber': '1234',\r\n 'card_cardType': '001',\r\n 'billTo_firstName': student1.first_name,\r\n 'billTo_lastName': u\"\\u2603\",\r\n 'orderNumber': str(order1.id),\r\n 'orderCurrency': 'usd',\r\n 'decision': 'ACCEPT',\r\n 'ccAuthReply_amount': '0.00'\r\n }\r\n\r\n # tests for an order number that doesn't match up\r\n params_bad_ordernum = params.copy()\r\n params_bad_ordernum['orderNumber'] = str(order1.id + 10)\r\n with self.assertRaises(CCProcessorDataException):\r\n payment_accepted(params_bad_ordernum)\r\n\r\n # tests for a reply amount of the wrong type\r\n params_wrong_type_amt = params.copy()\r\n params_wrong_type_amt['ccAuthReply_amount'] = 'ab'\r\n with self.assertRaises(CCProcessorDataException):\r\n payment_accepted(params_wrong_type_amt)\r\n\r\n # tests for a reply amount of the wrong type\r\n params_wrong_amt = params.copy()\r\n params_wrong_amt['ccAuthReply_amount'] = '1.00'\r\n with self.assertRaises(CCProcessorWrongAmountException):\r\n payment_accepted(params_wrong_amt)\r\n\r\n # tests for a not accepted order\r\n params_not_accepted = params.copy()\r\n params_not_accepted['decision'] = \"REJECT\"\r\n self.assertFalse(payment_accepted(params_not_accepted)['accepted'])\r\n\r\n # finally, tests an accepted order\r\n self.assertTrue(payment_accepted(params)['accepted'])", "def test_00_lunch_order(self):\r\n cr, uid = self.cr, self.uid\r\n self.order_one = self.lunch_order_line.browse(cr,uid,self.new_id_order_line,context=None)\r\n #we check that our order_line is a 'new' one and that there are no cashmove linked to that order_line:\r\n self.assertEqual(self.order_one.state,'new')\r\n self.assertEqual(list(self.order_one.cashmove), [])\r\n #we order that orderline so it's state will be 'ordered'\r\n self.order_one.order()\r\n self.order_one = self.lunch_order_line.browse(cr,uid,self.new_id_order_line,context=None)\r\n #we check that our order_line is a 'ordered' one and that there are no cashmove linked to that order_line:\r\n self.assertEqual(self.order_one.state,'ordered')\r\n self.assertEqual(list(self.order_one.cashmove), [])", "def testCourses(self):\n self.person.invokeFactory(type_name=\"FSDCourse\", id=\"test-course\")\n self.failUnless('test-course' in self.person.contentIds())\n self.failUnless('test-course' in [c.id for c in self.person.getCourses()])", "def test_certificate_validations():\n course_runs = CourseRunFactory.create_batch(2)\n programs = ProgramFactory.create_batch(2)\n\n course_runs[0].course.page.certificate_page.save_revision()\n course_runs[1].course.page.certificate_page.save_revision()\n\n programs[0].page.certificate_page.save_revision()\n programs[1].page.certificate_page.save_revision()\n\n course_certificate = CourseRunCertificateFactory(\n course_run=course_runs[0],\n certificate_page_revision=course_runs[\n 1\n ].course.page.certificate_page.get_latest_revision(),\n )\n program_certificate = ProgramCertificateFactory(\n program=programs[0],\n certificate_page_revision=programs[\n 1\n ].page.certificate_page.get_latest_revision(),\n )\n\n # When the revision doesn't match the courseware\n with pytest.raises(\n ValidationError,\n match=f\"The selected certificate page {course_certificate} is not for this course {course_runs[0].course}.\",\n ):\n course_certificate.clean()\n\n with pytest.raises(\n ValidationError,\n match=f\"The selected certificate page {program_certificate} is not for this program {programs[0]}.\",\n ):\n program_certificate.clean()", "def validate_testdata(self):\r\n self._get_tcorder()\r\n for line in self.data:\r\n if not line.startswith(\" \"):\r\n tcname = line.strip(\"\\n\")\r\n continue\r\n if \"[Setup]\" in line:\r\n if \"depends\" in line:\r\n line = line.strip(\"\\n\").split(\"depends\")[1][1:]\r\n depends = line.split()[0].split(',')\r\n self._check_dependency(tcname, depends)\r\n\r\n if self.dependency:\r\n msg = \"Test cases are not in proper dependency order.\\n\"\r\n for i in self.dependency:\r\n msg = msg + i\r\n logger.warn(msg, console=False)\r\n notify.message(msg)\r\n raise DependencyException(msg)\r\n else:\r\n msg = \"Testcases are in correct dependency order.\"\r\n logger.warn(msg)\r\n notify.message(msg)", "def test_init(self):\n test_order = Order(\"1\", \"Large\", \"Thin\", \"Cheese\")\n self.assertEqual(test_order.quantity, \"1\")\n self.assertEqual(test_order.size, \"Large\")\n self.assertEqual(test_order.crust, \"Thin\")\n self.assertEqual(test_order.toppings, \"Cheese\")", "def testProtractedNSESanityChecks(self):\n self.assertGreater(self.c3.get_species_richness(1), self.c2.get_species_richness(1))\n self.assertLess(self.c4.get_species_richness(1), self.c3.get_species_richness(1))", "def check_prerequisites(self):\n self.courses_not_completed = self.prerequisite_set - set(self.user_courses.keys())", "def test_c(self):\n v1 = versions.Version(version='1.2', name='foo')\n v2 = versions.Version(version='1.2.1', name='bar')\n\n self.assertTrue(v1 <= v2)\n self.assertFalse(v2 <= v1)", "def test_entities__EntityOrder____iter____1(entityOrder):\n assert ([\n 'IcemacAddressbookAddressbookAddressbook',\n 'IcemacAddressbookPersonPerson',\n 'IcemacAddressbookPersonPersondefaults',\n 'IcemacAddressbookAddressPostaladdress',\n 'IcemacAddressbookAddressPhonenumber',\n 'IcemacAddressbookAddressEmailaddress',\n 'IcemacAddressbookAddressHomepageaddress',\n 'IcemacAddressbookFileFileFile',\n 'IcemacAddressbookKeywordKeyword',\n ] == list(iter(entityOrder)))", "def _check_dependency(self, tcname, depends):\r\n tcindex = self.tcorder.index(tcname)\r\n dependsindex = []\r\n for i in depends:\r\n dependsindex.append(self.tcorder.index(i))\r\n for i in dependsindex:\r\n if tcindex < i:\r\n msg = \"%s must be ordered after %s\\n\" %(tcname, self.tcorder[i])\r\n self.dependency.append(msg)", "def cross_validate(self, contents, required=None, forbidden=None):\n if required:\n for item in required:\n self.assertTrue(\n item in contents,\n \"Required entry [{item}] not found in:\\n{contents}\".format(\n item=item, contents=contents\n )\n )\n if forbidden:\n for item in forbidden:\n self.assertTrue(\n item not in contents,\n \"Forbidden entry [{item}] found in:\\n{contents}\".format(\n item=item, contents=contents\n )\n )", "def test_dates_must_be_in_order_ended(make_one):\n w = make_one(\"TEST-1\")\n w.queued_at = datetime.datetime.now() - datetime.timedelta(days=14)\n w.ended_at = datetime.datetime.now() - datetime.timedelta(days=20)\n\n with pytest.raises(ValidationError):\n w.check_dates()", "def test_c(self):\n v1 = versions.Version(version='1.2.0', name='foo')\n v2 = versions.Version(version='1.2', name='bar')\n\n self.assertFalse(v1 < v2)\n self.assertTrue(v2 < v1)", "def test_open_ru_ballance_after_make_position(self, ):\n (mid, aid) = self.make_money_and_account() #@UnusedVariable\n self.load_data_into_account(aid)\n accs = self.model.list_view_accounts().fetchall()\n self.assertEqual(1, len(accs))\n before = accs[0]['current_money']\n self.model.tamake_positions_for_whole_account(aid)\n accs = self.model.list_view_accounts().fetchall()\n after = accs[0]['current_money']\n self.assertAlmostEqual(before, after)", "def test_validate_wc3(self):\r\n assert self.wc2_tree != 0", "def checks(self, error_margin=0.1):\n\n # Check all compartments are positive\n for label in self.labels:\n assert self.compartments[label] >= 0.", "def test_create_confirm_order_details(self):\n pass", "def test_place_multiple_orders(self):\n self.orders_list.place_order(self.order)\n self.orders_list.place_order(self.order)\n self.orders_list.place_order(self.order)\n self.assertEqual(len(self.orders_list.orders_list), 3)\n self.assertEqual(self.orders_list.orders_list[2].order_id, 2)", "def test_05_purchase_order(self):\n # Create purchase Order and check purchase order was created correctly\n # (without lines)\n order = self.create_po()\n self.assertTrue(order)\n self.assertFalse(order.order_line)\n\n # Add one sellable line (first line)\n sellable_product = self.product_obj.browse(self.sellable_product)\n self.create_pol(order, sellable_product)\n self.assertTrue(order.order_line)\n self.assertEquals(len(order.order_line), 1)\n self.assertIn(sellable_product, order.order_line.mapped('product_id'))\n self.assertEquals(order.order_line.product_id.state2, 'sellable')\n\n # Add one draft line (second line)\n draft_product = self.product_obj.browse(self.draft_product)\n self.create_pol(order, draft_product)\n self.assertEquals(len(order.order_line), 2)\n self.assertIn(draft_product, order.order_line.mapped('product_id'))\n self.assertEquals(set(order.order_line.mapped('product_id.state2')),\n set(['sellable', 'draft']))\n\n # Add one obsolete line. This will raise an exception.\n obsolete_product = self.product_obj.browse(self.obsolete_product)\n with self.assertRaises(exceptions.Warning):\n self.create_pol(order, obsolete_product)", "def test_basic_end(self):\n self.assertLines(\n [\"-E\", \"2\", \"examples/dummy4.csv\"], [\"a,b,c\", \"1,2,3\", \"4,5,6\",]\n )", "def testCC(self):\n self.assertEqual(\n self.cc,\n self.cd.cc\n )", "def test_create_course_case_change(self):\r\n self.course_data['number'] = 'capital'\r\n self.client.ajax_post('/course/', self.course_data)\r\n cache_current = self.course_data['org']\r\n self.course_data['org'] = self.course_data['org'].lower()\r\n self.assert_course_creation_failed('There is already a course defined with the same organization, course number, and course run. Please change either organization or course number to be unique.')\r\n self.course_data['org'] = cache_current\r\n\r\n self.client.ajax_post('/course/', self.course_data)\r\n cache_current = self.course_data['number']\r\n self.course_data['number'] = self.course_data['number'].upper()\r\n self.assert_course_creation_failed('There is already a course defined with the same organization, course number, and course run. Please change either organization or course number to be unique.')", "def test_pre_order_0_3(bst_right_balance):\n assert tuple(bst_right_balance.pre_order()) == (6, 5, 2, 8, 7, 9)", "def test_case_customer_part_orders(self):\n pass", "def test_check_validity(\n student_names, project_names, supervisor_names, capacities, seed, clean\n):\n\n *_, game = make_game(\n student_names, project_names, supervisor_names, capacities, seed, clean\n )\n\n game.solve()\n assert game.check_validity()", "def test_invalid_sequence(self):\n b1 = board.Board(self.small_plain)\n start = np.array((3, 3), dtype='int')\n k1 = knight.Knight(b1,start)\n # set move sequence\n move_seq = [0, 5, 6, 6, 3, 2]\n # check sequence validity\n (cost, valid, endloc) = k1.validate_sequence(move_seq)\n self.assertFalse(valid)\n self.assertEqual(cost, 0)", "def test_ordering_create(self):\n test_questionnaire = Questionnaire.objects.create(title='test_ordering_1')\n Question.objects.create(\n questionnaire=test_questionnaire,\n question_type='boolean',\n question_text='question 1',\n )\n Question.objects.create(\n questionnaire=test_questionnaire,\n question_type='boolean',\n question_text='question 2',\n )\n\n self.assertEqual(test_questionnaire.questions.get(question_text='question 1').order, 0)\n self.assertEqual(test_questionnaire.questions.get(question_text='question 2').order, 1)", "def test_c(self):\n v1 = versions.Version(version='1.2.0', name='foo')\n v2 = versions.Version(version='1.2', name='bar')\n\n self.assertTrue(v1 > v2)\n self.assertFalse(v2 > v1)", "def test_program_course_auto_position():\n first_course = CourseFactory.create(position_in_program=None)\n assert first_course.position_in_program == 1\n second_course = CourseFactory.create(\n program=first_course.program, position_in_program=None\n )\n assert second_course.position_in_program == 2", "def test_careers_invalid_student(self):\n student_id = '1234567890'\n result = self.ucuenca.schedule(student_id)\n self.assertFalse(result)", "def test_pre_order_0_4(bst_wiki):\n assert tuple(bst_wiki.pre_order()) == (7, 4, 2, 1, 3, 6, 5, 9, 8)", "def test_validate_crown(session, desc, valid, message_content):\n # setup\n for reg_type in CrownChargeTypes:\n if validator.validate_allowed_type(reg_type.value) != '':\n continue\n\n json_data = copy.deepcopy(FINANCING)\n json_data['type'] = reg_type.value\n del json_data['trustIndenture']\n if reg_type.value == 'OT' and desc != DESC_MISSING_OT_DESC:\n json_data['otherTypeDescription'] = 'TEST OTHER DESC'\n message_content = None\n elif reg_type.value != 'OT' and desc == DESC_INCLUDES_OT_DESC:\n json_data['otherTypeDescription'] = 'TEST OTHER DESC'\n elif desc == DESC_MISSING_OT_DESC or desc == DESC_INCLUDES_OT_DESC:\n message_content = None\n if desc != DESC_EXCLUDES_LY:\n del json_data['lifeYears']\n if desc != DESC_INFINITY_INVALID:\n json_data['lifeInfinite'] = True\n else:\n json_data['lifeInfinite'] = False\n if desc == DESC_MISSING_GC:\n del json_data['generalCollateral']\n if desc != DESC_INCLUDES_VC:\n del json_data['vehicleCollateral']\n\n # print('REG TYPE: ' + str(json_data['type']))\n error_msg = validator.validate(json_data)\n if valid:\n assert error_msg == ''\n elif message_content:\n # print(error_msg)\n assert error_msg != ''\n assert error_msg.find(message_content) != -1", "def test_process_order(self):\n expected_contents = self.fill_session_cart()\n\n response = self.client.post(\n self.CHECKOUT_URL, self.build_checkout_form())\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"Your order was placed.\")\n\n placed_order = OrderInfo.objects.get()\n order_contents = placed_order.ordercontents_set.all()\n # arbitrary 5 seconds to account for some fault\n self.assertTrue(\n timezone.now() - placed_order.ordered < timedelta(seconds=5))\n self.assertEqual(len(expected_contents), len(order_contents))\n for expected in expected_contents:\n db_contents = order_contents.get(menu_item__id=expected['id'])\n dict_from_db = {\n 'id': db_contents.menu_item.id,\n 'name': db_contents.menu_item.name,\n 'price': db_contents.menu_item.price,\n 'amount': db_contents.amount,\n 'cost': db_contents.cost,\n }\n self.assertEqual(expected, dict_from_db)", "def test_courseware_nav(self):\r\n # Navigate to the courseware page from the info page\r\n self.course_info_page.visit()\r\n self.tab_nav.go_to_tab('Courseware')\r\n\r\n # Check that the courseware navigation appears correctly\r\n EXPECTED_SECTIONS = {\r\n 'Test Section': ['Test Subsection'],\r\n 'Test Section 2': ['Test Subsection 2', 'Test Subsection 3']\r\n }\r\n\r\n actual_sections = self.course_nav.sections\r\n for section, subsections in EXPECTED_SECTIONS.iteritems():\r\n self.assertIn(section, actual_sections)\r\n self.assertEqual(actual_sections[section], EXPECTED_SECTIONS[section])\r\n\r\n # Navigate to a particular section\r\n self.course_nav.go_to_section('Test Section', 'Test Subsection')\r\n\r\n # Check the sequence items\r\n EXPECTED_ITEMS = ['Test Problem 1', 'Test Problem 2', 'Test HTML']\r\n\r\n actual_items = self.course_nav.sequence_items\r\n self.assertEqual(len(actual_items), len(EXPECTED_ITEMS))\r\n for expected in EXPECTED_ITEMS:\r\n self.assertIn(expected, actual_items)", "def test_ordinal_third(self):\n with open(\"tests/data_files/presidents_day_dates.txt\", \"r\") as dates_file:\n dates_strings_list = dates_file.read().splitlines()\n\n for date_string in dates_strings_list:\n test_date = date(*[int(p) for p in date_string.split(\"-\")])\n presidents_day = get_by_values(Ordinal.third, Weekday.Monday, Month.February, test_date.year)\n\n self.assertEquals(test_date, presidents_day)", "def test_entities__Entity__getFieldOrder__1(entity):\n assert [] == entity.getFieldOrder()", "def test_kyc_post_legal(self):\n pass", "def test_cash(currency, expected):\n cash = Cash(currency)\n\n assert cash.cash_like\n assert cash.currency == expected", "def test_create422_below_min_quantity(self):\n card_order_editable = CardOrderEditable(\n quantity = 1\n )\n with self.assertRaises(Exception) as context:\n self.api.create(self.card, self.card_order_editable)\n self.assertTrue(\"Number of cards in order must be at least\" in context.exception.__str__())", "def test_card_sort_ace_greater_than_king():\n spade_ace_card = Card(Suit.SPADE, 1)\n spade_king_card = Card(Suit.SPADE, 13)\n assert spade_ace_card > spade_king_card", "def test_sad_purchasePlaces_negative(self):\n\n num_places = -1\n for competition in self.competitions:\n rv = self.app.post(\n \"/purchasePlaces\",\n data={\n \"places\": num_places,\n \"club\": self.clubs[0][\"name\"],\n \"competition\": competition[\"name\"],\n },\n )\n\n assert rv.status_code in [400]\n assert b\"Something went wrong-please try again\" in rv.data", "def test_create_course_duplicate_number(self):\r\n self.client.ajax_post('/course/', self.course_data)\r\n self.course_data['display_name'] = 'Robot Super Course Two'\r\n self.course_data['run'] = '2013_Summer'\r\n\r\n self.assert_course_creation_failed('There is already a course defined with the same organization, course number, and course run. Please change either organization or course number to be unique.')", "def testCC(self):\n self.assertEqual(\n self.cc,\n self.ccr.cc\n )\n\n self.assertEqual(\n None,\n self.ccr_bad.cc\n )", "def test_assert_truth(self):\n\n # Confused? This video should help:\n #\n # http://bit.ly/about_asserts\n\n self.assertTrue(True) # This should be true", "def test_num_reac(self):\n self.assertEqual(self.Nreac, 1)", "def test_acknowledge_orders(self):\n pass", "def _check(self):\n assert isinstance(self._price, int)\n assert self._price >= 0\n assert isinstance(self._units, int)\n assert self._units > 0\n assert self._side == OrderSide.BUY or self._side == OrderSide.SELL\n assert self._type == OrderType.LIMIT or self._type == OrderType.CANCEL\n assert isinstance(self._market, int)\n assert self._market > 0", "def test_items_in_order(self):\n t1 = datetime.now()\n t2 = t1 + timedelta(seconds=1)\n s1 = ListTimeSeries([[t1, 123]])\n s2 = ListTimeSeries([[t2, 456]])\n c = CompositeTimeSeries(\"abc\", [s1, s2])\n rows = [r for r in c.rows()]\n assert_that(rows, is_(equal_to([\n [t1, 123], [t2, 456]\n ])))", "def test_validate_valid_org(self):\r\n assert self.org_tree != 0", "def test_ordering(self):\r\n def verify_order(source_usage_key, parent_usage_key, source_position=None):\r\n usage_key = self._duplicate_item(parent_usage_key, source_usage_key)\r\n parent = self.get_item_from_modulestore(parent_usage_key)\r\n children = parent.children\r\n if source_position is None:\r\n self.assertFalse(source_usage_key in children, 'source item not expected in children array')\r\n self.assertEqual(\r\n children[len(children) - 1],\r\n usage_key,\r\n \"duplicated item not at end\"\r\n )\r\n else:\r\n self.assertEqual(\r\n children[source_position],\r\n source_usage_key,\r\n \"source item at wrong position\"\r\n )\r\n self.assertEqual(\r\n children[source_position + 1],\r\n usage_key,\r\n \"duplicated item not ordered after source item\"\r\n )\r\n\r\n verify_order(self.problem_usage_key, self.seq_usage_key, 0)\r\n # 2 because duplicate of problem should be located before.\r\n verify_order(self.html_usage_key, self.seq_usage_key, 2)\r\n verify_order(self.seq_usage_key, self.chapter_usage_key, 0)\r\n\r\n # Test duplicating something into a location that is not the parent of the original item.\r\n # Duplicated item should appear at the end.\r\n verify_order(self.html_usage_key, self.usage_key)", "def test_get_order(self):\n pass", "def testSortedNotes(self):\n for simple_score in self.simple_scores.values():\n notes = simple_score.sorted_notes\n assert all(notes[i].start_time <= notes[i + 1].start_time\n for i in range(len(notes) - 1))", "def _validate_order(self, order, account, quotation, strategy_name):\n if quotation.volume < order.volume:\n print('Limit volume bardata volume:%d, untraded volume:%d, date:%s' % \n (quotation.volume, order.volume, quotation.date))\n return False\n\n # Calculate this order's margin\n if order.offset == 'open': \n posid = order.instrument+'-'+order.direction\n margin_ratio = account.portfolios[strategy_name].positions[posid].margin_ratio = quotation.margin_ratio\n multiplier = account.portfolios[strategy_name].positions[posid].multiplier = quotation.multiplier\n margin = order.price * order.volume * margin_ratio * multiplier\n if margin > account.available:\n print('Lack of capital available:%f, margin:%f' % (account.available, margin))\n return False\n return True\n elif order.offset == 'close':\n if order.direction == 'long':\n posid = order.instrument+'-'+'short'\n elif order.direction == 'short':\n posid = order.instrument+'-'+'long'\n hold_posi_quantity = account.portfolios[strategy_name].positions[posid].total_position\n try:\n assert order.volume > 0, 'close volume need greater than zero %d'%(order.volume)\n assert order.volume <= hold_posi_quantity, 'close volume need lower than hold posi quantity %d:%d'%(order.volume,\n hold_posi_quantity)\n except AssertionError as e:\n #print(e)\n return False\n return True\n elif order.offset == 'closetoday':\n order.direction = 'long' if order.direction == 'short' else 'short'\n posid = order.instrument+'-'+order.direction\n td_hold_posi_quantity = account.portfolios[strategy_name].positions[posid].today_position\n try:\n assert order.volume < td_hold_posi_quantity, 'close today position need lower than today hold position'\n except AssertionError as e:\n print(e)\n return False\n return True\n else:\n return False", "def test_contracts(self):\n correct_contracts = [factories.ProjectContract(projects=self.projects,\n status=ProjectContract.STATUS_CURRENT) for i in range(3)]\n response = self._get()\n self.assertEqual(response.status_code, 200)\n contracts = response.context['contracts']\n self.assertEqual(len(contracts), 3)\n for i in range(3):\n self.assertTrue(correct_contracts[i] in contracts)", "def testConsistency(self):\n #self.assertAlmostEqual(self.fxlinkedcashflow.amount(),0)", "def test_entities__EntityOrder__up__3(entityOrder):\n person = IEntity(IPerson)\n assert 1 == entityOrder.get(person)\n with pytest.raises(ValueError):\n entityOrder.up(person, 2)", "def test_valid_sequence(self):\n b1 = board.Board(self.small_plain)\n start = np.array((3, 3), dtype='int')\n k1 = knight.Knight(b1,start)\n # set move sequence\n move_seq = [0, 5, 6, 3, 2]\n # check sequence validity\n (cost, valid, endloc) = k1.validate_sequence(move_seq)\n self.assertTrue(valid)\n self.assertEqual(cost, len(move_seq))\n self.assertTrue((k1.position == start).all())\n # change the board layout to reflect the move\n for each in move_seq:\n k1.execute_move(each)\n self.assertTrue((k1.position == np.array((2, 3), dtype='int')).all())\n # confirm state of board\n with capture_output() as (out, _):\n b1.display()\n my_out = out.getvalue().strip()\n out.close()\n out_list = [ each.strip() for each in\n \"\"\". . . . . . . .\n . . . x x S . .\n . . . K . x x S\n . . . S x S x x\n . . . x x . . .\n . . . x S . . .\n . . . . . . . .\n . . . . . . . .\"\"\".split('\\n')]\n expected_out = '\\n'.join(out_list)\n self.assertEqual(my_out, expected_out)", "def test_successful_add_instructor():\n assert add_instructor('mary', 'jones', 'instructor')", "def test_invalid_grid_values_non_ascending(self):\n self.assertRaises(ValueError, splines.Spline.__init__, splines.Spline,\n np.array([0.1, 0.5, 0.4]), np.array([1, 2, 3, 4]))", "def test_special_case(self):\n cases = [\n ('3467875434578764345789654', False),\n ('AAAAAAAAAAA', False),\n ('', False),\n ]\n for titulo_eleitoral, is_valid in cases:\n self.assertEqual(self.titulo_eleitoral.validate(titulo_eleitoral), is_valid)", "def testin_order_0_4(bst_wiki):\n assert tuple(bst_wiki.in_order()) == (1, 2, 3, 4, 5, 6, 7, 8, 9)", "def test_lpdaac_bad(self):\n with self.assertRaises(InventoryException):\n api.inventory.check(self.lpdaac_order_bad)", "def test_token_order(self):\n tokens = [Token(1), Token(2), Token(3), Token(4)]\n tokens_equal = [Token(1), Token(1)]\n self._check_sequence_consistency(tokens)\n self._check_sequence_consistency(tokens_equal, equal=True)", "def _perform_assertion(self,\n balance_orig: list,\n balance_exp: list):\n\n results_annual = cpfhelpers.calc_annual_change(\n self.salary * 12,\n self.bonus,\n self.dob,\n balance_orig[0],\n balance_orig[1],\n balance_orig[2],\n account_deltas={},\n date_start=self.date_start)\n\n assert str(round(balance_exp[0], 2)) == results_annual[strings.OA]\n assert str(round(balance_exp[1], 2)) == results_annual[strings.SA]\n assert str(round(balance_exp[2], 2)) == results_annual[strings.MA]", "def check_item_order(context, container, position, item, attribute, value):\n index = dict(first=0, last=-1)[position]\n assert_equal(value, list(get_container(context, container)[f\"{item}s\"].values())[index][attribute])", "def _validate_positions():\n positions = set([field[\"position\"] for field in fields])\n if len(positions) != len(fields):\n raise IncorrectPredictionsTableOrder", "def assert_created_course(self, number_suffix=None):\r\n test_course_data = {}\r\n test_course_data.update(self.course_data)\r\n if number_suffix:\r\n test_course_data['number'] = '{0}_{1}'.format(test_course_data['number'], number_suffix)\r\n course_key = _get_course_id(test_course_data)\r\n _create_course(self, course_key, test_course_data)\r\n # Verify that the creator is now registered in the course.\r\n self.assertTrue(CourseEnrollment.is_enrolled(self.user, course_key))\r\n return test_course_data", "def verify_that_the_acl_was_not_set_to_rtacltest3(driver):\n assert wait_on_element(driver, 5, f'//div[contains(text(),\"rt-acl-test-1\")]//button', 'clickable')\n driver.find_element_by_xpath(f'//div[contains(text(),\"rt-acl-test-1\")]//button').click()\n time.sleep(3)\n assert wait_on_element(driver, 5, f'//tr[contains(.,\"rt-acl-test-3\")]//mat-icon[text()=\"more_vert\"]', 'clickable')\n driver.find_element_by_xpath(f'//tr[contains(.,\"rt-acl-test-3\")]//mat-icon[text()=\"more_vert\"]').click()\n time.sleep(1)\n assert wait_on_element(driver, 5, '//button[normalize-space(text())=\"View Permissions\"]')\n driver.find_element_by_xpath('//button[normalize-space(text())=\"View Permissions\"]').click()\n assert wait_on_element(driver, 5, '//div[contains(text(),\"User - games\")]') is False", "def test_proper(self):\n\n self.assertTrue(self.cs.isProper)\n self.assertFalse(self.cs.isDegenerate)", "def test_buyTicket_Valid_Paramaters():\n old_venue_balance = testVenue.wallet\n assert testUser3.buyTicket(testTicket3)\n assert testTicket3 in testUser3.inventory\n assert not testTicket3.for_sale\n assert testUser3.wallet == 950\n assert testVenue.wallet == old_venue_balance + testTicket3.list_price", "def test_dates_must_be_in_order_ended_before_started(make_one):\n w = make_one(\"TEST-1\")\n w.queued_at = datetime.datetime.now() - datetime.timedelta(days=14)\n w.started_at = datetime.datetime.now() - datetime.timedelta(days=10)\n w.ended_at = datetime.datetime.now() - datetime.timedelta(days=11)\n\n with pytest.raises(ValidationError):\n w.check_dates()", "def test_bom(self):\n self.validate_goal_for('game-20110901-055435-5a8e3666.html',\n u'Squiddy',\n 'BOM')", "def test_check_category_input_3(self):\n choices = [(1, 'choice 1'), (2, 'choice 2')]\n assert validation.check_category_input(2, choices) == True", "def test_order_warnings(self):\n\n o1 = OrderTest.create_order_1()\n\n # make sure there are no warnings now, one clean order\n self.assertEqual(len(o1.warnings()), 0)\n\n o2 = OrderTest.create_order_2(o1.inmate)\n\n # make sure there is a prior-order warning\n self.assertTrue(\"Patron received an order less than 3 months ago\" in o2.warnings())\n\n # make sure there's a prior-book warning\n self.assertTrue(True in [\"Patron already received\" in warning for warning in o2.warnings()])\n self.assertFalse(True in [\"blah blah blah this isn't a warning\" in warning for warning in o2.warnings()])\n\n # make sure we haven't triggered the same-book warning\n self.assertFalse(True in [\"Two books in this\" in warning for warning in o2.warnings()])\n\n # Add another book\n b3 = models.Book()\n b3.order = o2\n b3.title = \"dictionary\"\n b3.full_clean()\n b3.save()\n\n # ...and test if it triggers the same-book warning\n self.assertTrue(True in [\"Two books in this\" in warning for warning in o2.warnings()])", "def test_valid_general_collateral():\n is_valid, errors = validate(GENERAL_COLLATERAL, 'generalCollateral', 'ppr')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert is_valid" ]
[ "0.67318577", "0.6418377", "0.6131915", "0.5973272", "0.59159654", "0.5835521", "0.5678925", "0.56525654", "0.5622847", "0.55975693", "0.55640495", "0.55434185", "0.5528798", "0.55012167", "0.54883766", "0.54165864", "0.539526", "0.53826475", "0.5382484", "0.5381499", "0.5373344", "0.5369267", "0.53634447", "0.53475976", "0.53415257", "0.5331142", "0.53071415", "0.5304982", "0.52964115", "0.52892596", "0.528757", "0.5283849", "0.52786005", "0.5273312", "0.52682364", "0.52681416", "0.52654225", "0.5254218", "0.5246269", "0.5228364", "0.522566", "0.52162325", "0.52015185", "0.51986295", "0.5192964", "0.51903456", "0.51850194", "0.51780653", "0.5176057", "0.51694614", "0.5159642", "0.5157856", "0.5157655", "0.5145449", "0.5132209", "0.5130731", "0.5128544", "0.512539", "0.51246595", "0.51242906", "0.51204294", "0.5119218", "0.51187223", "0.5112361", "0.5111356", "0.51104945", "0.51104623", "0.5107015", "0.5097157", "0.50949216", "0.5094176", "0.5093978", "0.509298", "0.50906706", "0.508699", "0.50768596", "0.5074144", "0.5068544", "0.50680876", "0.5063735", "0.5061489", "0.50594467", "0.50592154", "0.50575936", "0.5056615", "0.50565016", "0.50558513", "0.5051773", "0.5050219", "0.50486666", "0.5045339", "0.5044879", "0.50408393", "0.5039757", "0.5039538", "0.5038809", "0.50377804", "0.50336677", "0.5031801", "0.5026059" ]
0.51499826
53
Find a text label for an axis describing a provided CSV column.
def get_label(column): for key, label in column_to_label.items(): if key in column: return label
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _label(self, column):\n # XXX\n return column", "def _get_label ( self ):\n if self._label is not None:\n return self._label\n return 'Column %d' % (self.index + 1)", "def GetColLabelValue(self, col):\n label = self.column_label(self.colsel[col])\n \n labels = []\n for mark in sorted(self.marks.keys()):\n if self.colsel[col] in self.marks[mark]:\n labels.append(mark)\n\n if labels:\n return label + \"\\n(\" + ','.join(labels) + ')'\n else:\n return label", "def column_label(self, rawcol):\n label = self.colLabels[rawcol]\n\n try:\n idx = self.dynamic_cols.index(rawcol)\n except ValueError:\n pass\n else:\n if self.dynamic_expressions[idx]:\n label = self.dynamic_expressions[idx]\n\n #custom labels overrides automic column labels\n custom_label = self.column_labels_custom.get(rawcol)\n if custom_label:\n label = custom_label\n\n return label", "def getLabelColumn(self):\n return self.getOrDefault(self.labelColumn)", "def getAxisLabel(self, dim=0):\n return self.__axis_labels__[dim]", "def label(self, row: Dict[str, str]) -> str:\n\n return row['Annotation']", "def get_target(csv, text = False):\n y_mapping = {'BL1':0, 'PA1':1, 'PA2':2, 'PA3':3,'PA4':4}\n \n idx = max( csv.find('PA'), csv.find('BL'))\n label = csv[idx:idx+3]\n if text:\n return label\n return y_mapping[label]", "def pandas_find_post_label_str(index, dataframe):\n return dataframe.at[index, 'label']", "def _get_data_labels(sheet, row, col):\n final_column = col\n header_row = _FIELDS['cell_value']['header']['row']\n # Abstract this sort of thing\n header = sheet.cell(row + header_row, final_column).value\n while any(header.startswith(label) for label\n in _FIELDS['isotherm tabular']['labels']):\n final_column += 1\n header = sheet.cell(row + header_row, final_column).value\n return [sheet.cell(row + header_row, i).value for i in\n range(col, final_column)]", "def column_index(self, column_label):\n return self.column_labels.index(column_label)", "def test_get_dim_label_with_label(self):\n\n dim = self.oecd_datasets['oecd']['dimension']['id'][0]\n dims_df = pyjstat.get_dim_label(self.oecd_datasets['oecd'], dim)\n self.assertTrue(dims_df.iloc[0]['id'] == 'UNR')\n self.assertTrue(dims_df.iloc[-1]['label'] == 'Unemployment rate')", "def findLabel(row):\n if row[\"Max score\"] == row[\"TSG\"]:\n label = \"TSG\"\n elif row[\"Max score\"] == row[\"OG\"]:\n label = \"OG\"\n return label", "def _curve_labels(self, x_axis, sample, ylabel):\n return str(sample), x_axis.capitalize(), sample", "def label_extraction(self) -> None:\n self.df[\"label\"] = self.df[\"y\"]", "def _get_column(self, column_or_label):\n c = column_or_label\n if isinstance(c, collections.Hashable) and c in self.column_labels:\n return self[c]\n else:\n assert len(c) == self.num_rows, 'column length mismatch'\n return c", "def get_column_key(label: Tuple[str, ...], metrics: List[str]) -> Tuple[Any, ...]:\n parts: List[Any] = list(label)\n metric = parts[-1]\n parts[-1] = metrics.index(metric)\n return tuple(parts)", "def get_label_name(label):\n\tindex = np.argmax(label)\n\tlabels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\n\treturn labels[int(index)]", "def _get_axis_label(\n self,\n label: float | str | Mobject,\n axis: Mobject,\n edge: Sequence[float],\n direction: Sequence[float],\n buff: float = SMALL_BUFF,\n ) -> Mobject:\n\n label = self.x_axis._create_label_tex(label)\n label.next_to(axis.get_edge_center(edge), direction=direction, buff=buff)\n label.shift_onto_screen(buff=MED_SMALL_BUFF)\n return label", "def ex_label(self,label,argl):\n if len(label) > 0 and label[0] != '_':\n return label\n comment = ''\n for i in argl:\n phrase = ''\n if i == 'l':\n phrase = label\n elif i in self._labels.keys():\n phrase = self._labels[i]\n comment += phrase\n return comment", "def _get_labels(data, axis=0, always=True):\n # NOTE: Previously inferred 'axis 1' metadata of 1D variable using the\n # data values metadata but that is incorrect. The paradigm for 1D plots\n # is we have row coordinates representing x, data values representing y,\n # and column coordinates representing individual series.\n if axis not in (0, 1, 2):\n raise ValueError(f'Invalid axis {axis}.')\n labels = None\n _load_objects()\n if isinstance(data, ndarray):\n if not always:\n pass\n elif axis < data.ndim:\n labels = np.arange(data.shape[axis])\n else: # requesting 'axis 1' on a 1D array\n labels = np.array([0])\n # Xarray object\n # NOTE: Even if coords not present .coords[dim] auto-generates indices\n elif isinstance(data, DataArray):\n if axis < data.ndim:\n labels = data.coords[data.dims[axis]]\n elif not always:\n pass\n else:\n labels = np.array([0])\n # Pandas object\n elif isinstance(data, (DataFrame, Series, Index)):\n if axis == 0 and isinstance(data, (DataFrame, Series)):\n labels = data.index\n elif axis == 1 and isinstance(data, (DataFrame,)):\n labels = data.columns\n elif not always:\n pass\n else: # beyond dimensionality\n labels = np.array([0])\n # Everything else\n # NOTE: We ensure data is at least 1D in _to_arraylike so this covers everything\n else:\n raise ValueError(f'Unrecognized array type {type(data)}.')\n return labels", "def find_label(self, *args):\n return _ida_hexrays.cfunc_t_find_label(self, *args)", "def GetColumnText(self, column):\r\n\r\n return self._header_win.GetColumnText(column)", "def _get_col(self, idx):\n return self.text[self._fwf.column_slices[idx]]", "def get_label(path): # get ED ES label\n label_csv = pd.read_csv(path)\n label_list = []\n trans_list = list(np.array(label_csv).astype(np.int32))\n for i in trans_list:\n temp = []\n for j in i:\n if j >= 0:\n temp.append(j)\n label_list.append(temp)\n return label_list", "def annotate(row, ax, x='x', y='y', text='name', xytext=(7, -5), textcoords='offset points', **kwargs):\n # idx = row.name\n text = row[text] if text in row else str(text)\n x = row[x] if x in row else float(x)\n y = row[y] if y in row else float(y)\n ax.annotate(text, (row[x], row[y]), xytext=xytext, textcoords=textcoords, **kwargs)\n return row[text]", "def first_label(self):\r\n return self.labels.split(',')[0]", "def _get_label(obj):\n # NOTE: BarContainer and StemContainer are instances of tuple\n while not hasattr(obj, 'get_label') and isinstance(obj, tuple) and len(obj) > 1:\n obj = obj[-1]\n label = getattr(obj, 'get_label', lambda: None)()\n return label if label and label[:1] != '_' else None", "def get_label_coords(csv_file, name):\n labels = [] # np.zeros((50, 8), dtype=float)\n for row in csv_file:\n if row[0] == name:\n labels.append(row)\n else:\n pass\n\n return labels", "def label_axis(self, name, label):\n\n axis = self._find_axis(name)\n axis.axis_label = label", "def _get_labels(x_label, y_label, title, xlabel_str):\n if x_label is None:\n x_label = xlabel_str\n\n if y_label is None:\n y_label = \"Degree of membership\"\n\n if title is None:\n title = \"Degrees of membership of the samples to each cluster\"\n\n return x_label, y_label, title", "def get_label(self, index, key=\"Name\"):\n return eval(self.names[key][index])", "def _next_unlabelled_col(x):\n for i in range(self.n_cols):\n idx = (x + i) % self.n_cols\n x_current = self._x_positions[idx]\n if self._cols[x_current].label is None:\n return idx", "def _fact2label(self, ax_ndx, fact_ndx):\n if len(self._dims) > 1:\n key,value = self._factors[ax_ndx][fact_ndx]\n else:\n if fact_ndx == 1:\n return ''\n key,value = self._factors[ax_ndx][0]\n return '{} = {}'.format(key,value) if key != '' else ''", "def test_get_dim_label_with_index(self):\n\n dim = self.oecd_datasets['oecd']['dimension']['id'][2]\n dims_df = pyjstat.get_dim_label(self.oecd_datasets['oecd'], dim)\n self.assertTrue(dims_df.iloc[0]['id'] == '2003')\n self.assertTrue(dims_df.iloc[-1]['label'] == '2014')", "def fromLabel(name):\n return Data.labels.index(name)", "def column_name(self, label):\n self.output_label = label\n return self", "def get_data_labels(answer_mapping_df, column):\n labels = []\n for i in answer_mapping_df[column].columns.values:\n labels.append(answer_mapping_df.xs((column, i), level=('q_code', 'a_code'), axis=1).iloc[0,0])\n return labels", "def get_axis(self, axis):\n return self.index if axis == 0 else self.columns", "def get_label(urs):\n return assign_term(urs)[1]", "def axis_index(self, key):\n for i, name in enumerate(self.axis_labels):\n if name == key:\n return i\n raise ValueError(f'Axis not found: {key}')", "def get_axis_name(self, axis_id):\n if isinstance(axis_id, str):\n if axis_id in self.axes_names:\n return axis_id\n else:\n return None\n assert np.isreal(axis_id) and np.round(axis_id) == axis_id\n if axis_id >= 0 and axis_id < self.get_ndims():\n return self.axes_names[axis_id]\n else:\n return None", "def label(self, p_y_given_x):\n return np.argmax(p_y_given_x, axis=2).T", "def get_value_label(self, value):\n return self.label_config.get_index_label(value)", "def column(self, label):\n dis = []\n for x in self.rows:\n dis = dis + [x[self.column_labels.index(label)]]\n return dis\n # return self.rows[self.column_labels.index(label)]", "def axisinfo(unit, axis):\n if isinstance(unit, tuple):\n unit = unit[0]\n unit_obj = unit if isinstance(unit, Unit) else Unit(unit)\n name = unyt_arrayConverter._axisnames.get(axis, \"\")\n if unit_obj.is_dimensionless:\n label = name\n else:\n name += \" \"\n unit_str = unit_obj.latex_representation()\n if unyt_arrayConverter._labelstyle == \"[]\":\n label = name + \"$\\\\left[\" + unit_str + \"\\\\right]$\"\n elif unyt_arrayConverter._labelstyle == \"/\":\n axsym = \"$q_{\\\\rm\" + axis.axis_name + \"}$\"\n name = axsym if name == \" \" else name\n if \"/\" in unit_str:\n label = name + \"$\\\\;/\\\\;\\\\left(\" + unit_str + \"\\\\right)$\"\n else:\n label = name + \"$\\\\;/\\\\;\" + unit_str + \"$\"\n else:\n label = name + \"$\\\\left(\" + unit_str + \"\\\\right)$\"\n return AxisInfo(label=label.strip())", "def formatAxisLabel(self, label):\n # retrieve user preference\n symbol = bool(self.config('graph_labels_symbols', False))\n units = self.config('graph_labels_units', default='[]', astype=str)\n units = units.replace('unit', '').replace(' ', '')\n # format input\n if isinstance(label, str):\n if units != '[]': # that is default, no need to do anything\n expr = r'^(.* )\\[(.*)\\](.*)$' # [ ] as characters, not a set\n if label != '':\n f = refindall(expr, label)\n if isinstance(f, list) and len(f) == 1 and len(f[0]) == 3:\n if units in ['DIN', '/']:\n return f[0][0].strip(' ')+' / '+f[0][1]+''+f[0][2]\n elif units == '()':\n return f[0][0].strip(' ')+'('+f[0][1]+')'+f[0][2]\n elif isinstance(label, list):\n while len(label) < 3:\n label += ['']\n out = label[0]\n if symbol and len(label[1]) > 0:\n out += ' $' + label[1] + '$'\n if label[2] not in [None, ' ']:\n if units == '/':\n out += ' / ' + label[2]\n elif units == '()':\n out += ' (' + label[2] + ')'\n else:\n out += ' [' + label[2] + ']'\n return out.replace(' ', ' ')\n return label", "def label(tree):\n return tree[0]", "def get_label_name(self):\n command_type = self.get_type()\n if command_type == LABEL_COMMAND_TYPE:\n return self.command[1:-1] # ignores the () at the beginning and the end\n if command_type != EMPTY_COMMAND_TYPE: # increments the line number if it is not a blank line or a label\n self.__line_number += 1", "def _get_labels(self, ind):\n pass", "def getLabel(y, mode='default'):\n if mode=='default':\n return y\n elif mode=='r':\n y = y[:, :2]\n return y\n elif mode==\"eps_sm\":\n y = y[:, 2:6]\n return y\n elif mode=='eps':\n y = y[:, 2:]\n return y\n else:\n raise ValueError(\"Unknown mode {} found\".format(mode))", "def get_computed_label(self, element):\n pass", "def label(efo):\n url = 'https://www.ebi.ac.uk/ols/api/ontologies/efo/terms?iri=http://www.ebi.ac.uk/efo/'+efo\n try:\n response = requests.get(url).json()\n except:\n return \"\"\n if 'error' in response.keys():\n return \"\"\n return response['_embedded']['terms'][0]['label']", "def get_item_text(self, idx, col, absolute=False):\n\n return util.to_ustr(self.itemDataMap[self.itemIndexMap[idx] if not absolute else idx][self.get_real_col(col)])", "def GetColumnText(self, column):\r\n\r\n if column < 0 or column >= self.GetColumnCount():\r\n raise Exception(\"Invalid column\")\r\n \r\n return self._columns[column].GetText()", "def column_labels(self):\n return tuple(self._columns.keys())", "def OnSetColumnLabel(self, event):\n rawcol = self.Table.colsel[self.actCol]\n\n dialog = wx.TextEntryDialog(self,\n message = \"Enter column label\",\n caption = \"Column label\",\n defaultValue = self.Table.column_labels_custom.get(rawcol, ''),\n )\n if (dialog.ShowModal() == wx.ID_OK):\n self.Table.column_labels_custom[rawcol] = dialog.Value\n self.Refresh()\n self.Table.update_observers()", "def get_index_name(self, axis=0):\n return self.get_axis(axis).name", "def get_node_name(row, col):\n return \"({},{})\".format(row, col)", "def labels_x(x_unit, latex = True, verbose = 0): \n \n if verbose > 1:\n print(\"SpectraTools.Resources.UnitConversion.labels_x()\") \n \n if x_unit in nm_labels:\n return \"Wavelength (nm)\"\n elif x_unit in um_labels:\n if latex:\n return r\"Wavelength ($\\mu$m)\"\n else:\n return \"Wavelength (micron)\"\n elif x_unit in cm_labels:\n if latex:\n return r\"Energy (cm$^{-1}$)\"\n else:\n return \"Energy (cm-1)\"\n elif x_unit in ev_labels:\n return \"Energy (eV)\" \n else:\n return x_unit", "def Label(self) -> str:", "def encode_data(column: str, data):\n return label.fit_transform(data[column])", "def getAxisSuffix(d):\t\n\tdSuffix = { 'xyz':'3' ,\n\t 'sum':'s' ,\n\t 'xxx':'x' ,\n\t 'yyy':'y' ,\n\t 'zzz':'z' }\n\tsuffix = dSuffix.get(d['whichAx'],None)\n\tif not suffix:\n\t\traise '\\nunaccounted for whichAx suffix lookup with %s\\n' % d['whichAx']\n\telse:\n\t\treturn suffix", "def _axis_labels(self, data_name: str) -> Tuple[str, str]:\n\n # Single activity attributes (column name must be present in summary dataframe)\n if data_name == 'distance':\n if self.config.distance_unit == 'km':\n return 'distance_2d_km', 'Distance (km)'\n elif self.config.distance_unit == 'mile':\n return 'distance_2d_mile', 'Distance (miles)'\n elif data_name == 'duration':\n return 'duration', 'Duration (minutes)'\n\n # These can be used for either single activities (summary dataframe) or aggregates (time series dataframe)\n elif data_name == 'mean_speed':\n if self.config.distance_unit == 'km':\n return 'mean_kmph', 'Average speed (km/hour)'\n elif self.config.distance_unit == 'mile':\n return 'mean_mph', 'Average speed (miles/hour)'\n elif data_name == 'mean_hr':\n return 'mean_hr', 'Average heart rate (beats/minute)'\n\n # Aggregate attributes (column name must be present in time series dataframe)\n elif data_name == 'total_distance':\n if self.config.distance_unit == 'km':\n return 'total_distance_2d_km', 'Total distance (km)'\n elif self.config.distance_unit == 'mile':\n return 'total_distance_2d_mile', 'Total distance (miles)'\n elif data_name == 'total_duration':\n return 'total_duration', 'Total duration (minutes)'\n elif data_name == 'activity_count':\n return 'activity_count', 'Number of activities'\n\n else:\n raise ValueError(f'Bad value for `data_name`: \"{data_name}\".')", "def render_axis_labels(self, axes=None):\n raise NotImplementedError()", "def set_axis_label(\n self,\n axis: Union[int, Sequence[int]],\n label: Union[str, Sequence[str]],\n ):\n if isinstance(axis, Integral):\n axis = assert_axis_in_bounds(axis, self.ndim)\n if self.axis_labels[axis] != str(label):\n full_axis_labels = list(self.axis_labels)\n full_axis_labels[axis] = str(label)\n self.axis_labels = full_axis_labels\n self.last_used = axis\n else:\n full_axis_labels = list(self.axis_labels)\n # cast label to list for list comparison below\n label = list(label) # type: ignore\n axis = tuple(axis) # type: ignore\n if len(axis) != len(label):\n raise ValueError(\n trans._(\"axis and label sequences must have equal length\")\n )\n if label != full_axis_labels:\n for ax, val in zip(axis, label):\n ax = assert_axis_in_bounds(int(ax), self.ndim)\n full_axis_labels[ax] = val\n self.axis_labels = full_axis_labels", "def act_on_column_name(self, *, arg, value):\n assert isinstance(arg, (pl.DataFrame, type(None)))\n assert isinstance(value, str)\n return PolarsTerm(polars_term=pl.col(value), is_column=True)", "def labels(self):\n \n return self.column_labels", "def label_from_example(example):\n val = example.features.feature['label'].int64_list.value\n if val:\n return int(val[0])\n else:\n return None", "def coordLabel(self, loc, voxel=False):\n\n if not voxel:\n loc = affine.transform([loc], self.worldToVoxMat)[0]\n loc = [int(v) for v in loc.round()]\n\n if loc[0] < 0 or \\\n loc[1] < 0 or \\\n loc[2] < 0 or \\\n loc[0] >= self.shape[0] or \\\n loc[1] >= self.shape[1] or \\\n loc[2] >= self.shape[2]:\n return None\n\n return self[loc[0], loc[1], loc[2]]", "def get_labels_by_attr_name(self, attr_name):\n # get index of attribute\n try:\n attr_index = self.attr_names.index(attr_name)\n except:\n print('unidentified attribute name...!!!')\n\n return self.attr_data[:, attr_index]", "def setLabelColumn(self, value):\n return self._set(labelColumn=value)", "def setLabelColumn(self, value):\n return self._set(labelColumn=value)", "def setLabelColumn(self, value):\n return self._set(labelColumn=value)", "def setLabelColumn(self, value):\n return self._set(labelColumn=value)", "def get_label(self, which_label: str, extra_label: str) -> str:\n result = self.row_dict.get(extra_label)\n if result:\n # We will use this label\n pass\n elif which_label == 'first_label':\n header = self.row_header\n first_label = next((i for i in header if i.startswith('label')),\n None)\n if first_label is None:\n raise LabelNotFoundError()\n result = self.row_dict[first_label]\n elif which_label in self.row_dict:\n result = self.row_dict[which_label]\n else:\n raise LabelNotFoundError()\n str_result = str(result)\n return str_result", "def get_index(observable_nodes, label):\n for k in observable_nodes:\n if label in observable_nodes[k]:\n return observable_nodes[k][label]['category']", "def get_label(self, ):\n return self.attrs.get(self.AttributeNames.LABEL, None)", "def parse_label_column(label_string_tensor):\n # Build a Hash Table inside the graph\n table = tf.contrib.lookup.index_table_from_tensor(tf.constant(LABELS))\n\n # Use the hash table to convert string labels to ints and one-hot encode\n return table.lookup(label_string_tensor)", "def read_img_names_labels_csv(csv_path):\n df = pd.read_csv(csv_path)\n\n try:\n image_names_list = list(df['ImageName'])\n y = list(df['Label'])\n except KeyError:\n raise CsvColumnNameException(\" The column names of image-name_label csv must be 'ImageName' and 'Label' \")\n\n return image_names_list, y", "def getxLabel(self) -> int:\n if not self.debug:\n # TODO: Verificar\n self.myFieldFox.write(\"DISPlay:TITLe:DATA?\")\n ret = self.myFieldFox.read()\n else:\n ret = 'xLabel'\n return ret", "def find_label(self, *args):\n return _ida_hexrays.cfuncptr_t_find_label(self, *args)", "def label(self):\n # type: () -> str\n labels = self.__class__.__labels__\n return force_str(labels.get(self.value, self.name))", "def tex_axis_label(self, label, smalllabel=False):\n if isinstance(label, list):\n label = label[0]\n if not isinstance(label, str):\n raise ValueError(\"Label must be a string. Got %s of \"\n \"type %s\"%(label, type(label)))\n label = label.lower()\n pretty_labels = {}\n pretty_labels[\"atm_muon_scale\"] = r\"Muon Background Scale\"\n pretty_labels[\"nue_numu_ratio\"] = r\"$\\nu_e/\\nu_{\\mu}$ Ratio\"\n pretty_labels[\"nu_nc_norm\"] = r\"$\\nu$ NC Scale\"\n pretty_labels[\"nu_nubar_ratio\"] = r\"$\\nu/\\bar{\\nu}$ Ratio\"\n pretty_labels[\"barr_uphor_ratio\"] = r\"Barr Up/Horizontal Ratio\"\n pretty_labels[\"barr_nu_nubar_ratio\"] = r\"Barr $\\nu/\\bar{\\nu}$ Ratio\"\n pretty_labels[\"barr_uphor\"] = r\"Barr Up/Horizontal Ratio\"\n pretty_labels[\"barr_nu_nubar\"] = r\"Barr $\\nu/\\bar{\\nu}$ Ratio\"\n pretty_labels[\"delta_index\"] = r\"Atmospheric Index Change\"\n pretty_labels[\"theta13\"] = r\"$\\theta_{13}$\"\n pretty_labels[\"theta23\"] = r\"$\\theta_{23}$\"\n pretty_labels[\"deltacp\"] = r\"$\\delta_{\\mathrm{CP}}$\"\n pretty_labels[\"gamma\"] = r\"$\\Gamma$\"\n pretty_labels[\"sin2theta23\"] = r\"$\\sin^2\\theta_{23}$\"\n pretty_labels[\"deltam31\"] = r\"$\\Delta m^2_{31}$\"\n pretty_labels[\"deltam32\"] = r\"$\\Delta m^2_{32}$\"\n pretty_labels[\"deltam3l\"] = r\"$\\Delta m^2_{3l}$\"\n pretty_labels[\"aeff_scale\"] = r\"$A_{\\mathrm{eff}}$ Scale\"\n pretty_labels[\"energy_scale\"] = r\"Energy Scale\"\n pretty_labels[\"genie_ma_qe\"] = r\"GENIE $M_{A}^{QE}$\"\n pretty_labels[\"genie_ma_res\"] = r\"GENIE $M_{A}^{Res}$\"\n pretty_labels[\"dom_eff\"] = r\"DOM Efficiency\"\n pretty_labels[\"hole_ice\"] = r\"Hole Ice\"\n pretty_labels[\"hole_ice_fwd\"] = r\"Hole Ice Forward\"\n pretty_labels[\"degree\"] = r\"$^\\circ$\"\n pretty_labels[\"radians\"] = r\"rads\"\n pretty_labels[\"radian\"] = r\"rads\"\n pretty_labels[\"electron_volt ** 2\"] = r\"$\\mathrm{eV}^2$\"\n pretty_labels[\"electron_volt\"] = r\"$\\mathrm{eV}^2$\"\n pretty_labels[\"gigaelectron_volt\"] = r\"$\\mathrm{GeV}$\"\n pretty_labels[\"llh\"] = r\"Likelihood\"\n pretty_labels[\"conv_llh\"] = r\"Convoluted Likelihood\"\n pretty_labels[\"chi2\"] = r\"$\\chi^2$\"\n pretty_labels[\"mod_chi2\"] = r\"Modified $\\chi^2$\"\n pretty_labels[\"delta_llh\"] = r\"$\\Delta$ Likelihood\"\n pretty_labels[\"delta_conv_llh\"] = r\"$\\Delta$ Convoluted Likelihood\"\n pretty_labels[\"delta_chi2\"] = r\"$\\Delta\\chi^2$\"\n pretty_labels[\"delta_mod_chi2\"] = r\"$\\Delta$ $\\chi^2_{\\mathrm{mod}}$\"\n if smalllabel:\n pretty_labels[\"no\"] = r\"NO\"\n pretty_labels[\"io\"] = r\"IO\"\n else:\n pretty_labels[\"no\"] = r\"Normal Ordering\"\n pretty_labels[\"io\"] = r\"Inverted Ordering\"\n pretty_labels[\"nomsw\"] = r\"Normal Ordering, Matter Oscillations\"\n pretty_labels[\"iomsw\"] = r\"Inverted Ordering, Matter Oscillations\"\n pretty_labels[\"novacuum\"] = r\"Normal Ordering, Vacuum Oscillations\"\n pretty_labels[\"iovacuum\"] = r\"Inverted Ordering, Vacuum Oscillations\"\n pretty_labels[\"msw\"] = r\"Matter Oscillations\"\n pretty_labels[\"vacuum\"] = r\"Vacuum Oscillations\"\n pretty_labels[\"no,llr\"] = r\"LLR Method\"\n pretty_labels[\"no,llr,nufitpriors\"] = r\"LLR Method, Nu-Fit Priors\"\n pretty_labels[\"io,llr\"] = r\"llr Method\"\n pretty_labels[\"io,llr,nufitpriors\"] = r\"LLR Method, Nu-Fit Priors\"\n pretty_labels[\"nue\"] = r\"$\\nu_e$\"\n pretty_labels[\"nuebar\"] = r\"$\\bar{\\nu}_e$\"\n pretty_labels[\"numu\"] = r\"$\\nu_{\\mu}$\"\n pretty_labels[\"numubar\"] = r\"$\\bar{\\nu}_{\\mu}$\"\n pretty_labels[\"second\"] = r\"s\"\n pretty_labels[\"seconds\"] = r\"s\"\n pretty_labels[\"atm_delta_index\"] = r\"Atmospheric Index Change\"\n pretty_labels[\"pve\"] = r\"Positive\"\n pretty_labels[\"nve\"] = r\"Negative\"\n pretty_labels[\"fitwrong\"] = r\"Sensitivity Stability\"\n pretty_labels[\"fixwrong\"] = r\"Fitting Relevance\"\n pretty_labels[\"nminusone\"] = r\"Hidden Potential\"\n pretty_labels[\"minimiser_times\"] = r\"Minimiser Time (seconds)\"\n pretty_labels[\"minimiser_iterations\"] = r\"Minimiser Iterations\"\n pretty_labels[\"minimiser_funcevals\"] = r\"Minimiser Function Evaluations\"\n pretty_labels[\"minimiser_status\"] = r\"Minimiser Status\"\n pretty_labels[\"correlation_coefficients\"] = r\"Correlation Coefficients\"\n pretty_labels[\"true no, llr\"] = r\"True Normal Ordering, LLR\"\n pretty_labels[\"true io, llr\"] = r\"True Inverted Ordering, LLR\"\n pretty_labels[\"e_res_scale\"] = r\"Energy Resolution Scale\"\n pretty_labels[\"cz_res_scale\"] = r\"$\\cos\\theta_Z$ Resolution Scale\"\n pretty_labels[\"livetime\"] = r\"Livetime\"\n pretty_labels[\"julian_year\"] = r\"Years\"\n if label not in pretty_labels.keys():\n logging.warning(\"I have no nice label for %s. Returning as is.\"%label)\n return label\n return pretty_labels[label]", "def setLabelColumn(self, v):\n return self._set(labelColumn=v)", "def labels_y(y_unit, latex = True, verbose = 0): \n\n if verbose > 1:\n print(\"SpectraTools.Resources.UnitConversion.labels_y()\") \n \n if y_unit in absorption_labels:\n return \"Absorption (OD)\"\n elif y_unit in milli_absorption_labels:\n return \"Absorption (mOD)\" \n elif y_unit in transmission_1_labels:\n return \"Transmission\"\n elif y_unit in transmission_pct_labels:\n if latex:\n return r\"Transmission (\\%)\"\n else:\n return \"Transmission (%)\"\n else:\n return y_unit", "def label(self) -> str:\n return self[\"label\"]", "def label_to_class_name(label):\n try:\n genre_label = pd.read_csv(path.join(DATA_PATH, 'genre_labels.csv'))\n return genre_label[genre_label['label'] == int(label)]['genre'].values[\n 0]\n except IOError:\n return label", "def label_study(row):\n study_label = str(row['subjnum'])[0]\n return study_label", "def _get_label(self):\n return self.label", "def make_label(self, label, units):\n nice_label = self.tex_axis_label(label)\n if not (units == 'dimensionless') and \\\n (units is not None) and (not units == []):\n nice_label += ' (%s)'%self.tex_axis_label(units)\n return nice_label", "def names(self):\n labels = [\n \"$X_{%i}$\" % i if d.name is None else d.name\n for i, d in enumerate(self.dimensions)\n ]\n return labels", "def getMetaLabel(self, idx):\n return self.label_dict[idx].decode(\"utf-8\")", "def get_label(self, name):\n label_list = self.wls_board.get_labels()\n for label in label_list:\n if name in label.name: \n return label", "def axis_name(self):\n return self._axis_name", "def test_get_dim_index_with_label(self):\n\n dim = self.oecd_datasets['oecd']['dimension']['id'][0]\n dims_df = pyjstat.get_dim_index(self.oecd_datasets['oecd'], dim)\n self.assertTrue(dims_df.iloc[0]['id'] == 'UNR')\n self.assertTrue(dims_df.iloc[-1]['index'] == 0)", "def getLabel(*args):", "def getLabel(*args):", "def getLabel(*args):", "def set_axis_label(self, label, axis):\n if axis == 'x':\n self.axplot.set_xlabel(label)\n elif axis == 'y':\n self.axplot.set_ylabel(label)\n else:\n errmsg = 'Valid axis names are x and y.'\n raise ValueError(errmsg)" ]
[ "0.70492536", "0.65161985", "0.6515418", "0.63260293", "0.60894614", "0.6023913", "0.59223855", "0.5880934", "0.5865998", "0.57208353", "0.5644971", "0.56144625", "0.55985284", "0.55886084", "0.5540931", "0.5496037", "0.5494928", "0.5494829", "0.5493561", "0.5486655", "0.54548746", "0.5435441", "0.5435145", "0.53888637", "0.53557515", "0.53111815", "0.5297283", "0.52613366", "0.52561975", "0.5248511", "0.5245542", "0.52361315", "0.52269554", "0.5207962", "0.52039176", "0.51996404", "0.5192592", "0.5184705", "0.5179965", "0.5178345", "0.5170379", "0.5168883", "0.5129639", "0.51204437", "0.5107331", "0.50795585", "0.5073389", "0.5071071", "0.5051446", "0.50512695", "0.50386065", "0.50384295", "0.5036727", "0.50310814", "0.5026927", "0.50245035", "0.50244266", "0.5016515", "0.5009676", "0.50088656", "0.5001222", "0.4997084", "0.49967048", "0.49939737", "0.49901366", "0.49760878", "0.49736956", "0.49736297", "0.49718276", "0.49712053", "0.49611", "0.49573013", "0.49573013", "0.49573013", "0.49573013", "0.4950847", "0.49415556", "0.49350122", "0.49329463", "0.49181312", "0.49030164", "0.48990667", "0.48977038", "0.48971957", "0.4894217", "0.48907572", "0.48826885", "0.4876915", "0.4876161", "0.4875115", "0.48727176", "0.48715588", "0.48702148", "0.48700324", "0.48656157", "0.4855858", "0.4839264", "0.4839264", "0.4839264", "0.48341867" ]
0.7068317
0
Filter out all pandas.DataFrame without required columns
def dfs_filter(dfs, df_names, column_list): dfs_out = [] df_names_out = [] # loop over all (pandas.DataFrame, str) pairs for df, df_name in zip(dfs, df_names): has_all = True for column in column_list: if column not in df.columns: has_all = False break # if DataFrame does not have all specified columns just skip # the DataFrame if not has_all: continue # append the DataFrame and its name to the outputs dfs_out.append(df) df_names_out.append(df_name) return dfs_out, df_names_out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _filter_df(adjmat, df, verbose=3):\n remcols = df.columns[~np.isin(df.columns.values, adjmat.columns.values)].values\n if len(remcols)>0:\n if verbose>=3: print('[bnlearn] >Removing columns from dataframe to make consistent with DAG [%s]' %(remcols))\n df.drop(labels=remcols, axis=1, inplace=True)\n return df", "def _filter_df(adjmat, df, verbose=3):\n remcols = df.columns[~np.isin(df.columns.values, adjmat.columns.values)].values\n if len(remcols)>0:\n if verbose>=3: print('[bnlearn] >Removing columns from dataframe to make consistent with DAG [%s]' %(remcols))\n df.drop(labels=remcols, axis=1, inplace=True)\n return df", "def drop_extra_columns(self):\n table = self.data.loc[:, self._required_columns]\n return self.as_dataframe(table)", "def _clean_dataset(df: pd.DataFrame) -> pd.DataFrame:\n df = df.loc[:, ~df.columns.str.contains(\"^Unnamed\")]\n df = df.dropna()\n return df", "def filter_data(df, needed_columns, not_null_columns=[]):\n\n if all(x in needed_columns for x in not_null_columns):\n\n df = get_needed_columns(df, needed_columns)\n #Use the pandas method bc the other method was erroring on boolean index.\n #IM - 9/23/22\n df = df.dropna(subset=not_null_columns)#remove_rows_with_null_values(df, not_null_columns)\n\n return df\n else:\n print(\"Developer error, not null columns should be a subset of needed columns\")\n return df", "def select_columns(df):\n df = df.dropna(axis='columns', how='all') # drop columns containing only NaN\n keep_cols = [col for col in df.columns if 'normalized' not in col]\n df = df[keep_cols]\n return df", "def remove_intermediate_columns(dataframe):\n\n combined_dataframe_dropped_cols = dataframe.drop(columns = ['measureland_qualifier_flag_speed',\n 'measureland_qualifier_flag_distance',\n 'measureland_qualifier_flag_acceleration',\n 'measureland_qualifier_flag_visual'])\n\n print(\"Dimensions of combined dataframe after dropping columns:\", combined_dataframe_dropped_cols.shape)\n print(\"Combined dataframe after dropping columns: \", combined_dataframe_dropped_cols.sample(10))\n\n return combined_dataframe_dropped_cols", "def drop_one_elem_columns(self, df):\n df_ = df.copy()\n\n # Incldue columns in dataframe\n include_idx = []\n for i in df_.columns:\n len_unique = df_[i].dropna().unique().size\n if len_unique > 1:\n include_idx.append(i)\n\n df_ = df_[include_idx]\n return df_", "def _get_target_only_columns(self, df: DataFrame) -> DataFrame:\n target_table_columns = self.target_table.get_columns()\n \n # if mutation of incoming df is desired, make a deepcopy here\n filtered_df = df\n for column in filtered_df.columns:\n if column not in target_table_columns:\n print(f'dropping unused column \"{column}\"')\n filtered_df = filtered_df.drop(column)\n \n return filtered_df", "def filter_cols(df):\n comm_keys = list( set(df.keys()) & set(KEYS_FOR_ML) )\n filt_col_df = df.copy()[comm_keys]\n\n return filt_col_df", "def drop_uninformative_columns(df: pd.DataFrame) -> pd.DataFrame:\n for column, value in uninformative_columns(df):\n logger.debug(\n \"Dropping column %r from DataFrame (every value %s %r)\",\n column,\n \"is\" if isinstance(value, float) and np.isnan(value) else \"=\",\n value,\n )\n df = df.drop(column, axis=\"columns\")\n return df", "def clean_columns(df: pd.DataFrame, filled_rate: float = 0.6) -> pd.DataFrame:\n\n print(f\"Initial shape of the dataframe: {str(df.shape) : >17}\")\n # keep columns that are filled more than the filled rate, default = 60%\n df = df.loc[:, (df.isnull().mean() < (1 - filled_rate))]\n print(f\"Shape after removing null columns: {str(df.shape) : >14}\")\n\n return df", "def drop_columns(df, exclusion):\n for c in df.columns.values:\n if c not in exclusion:\n df.drop(c, axis=1, inplace=True)\n return df", "def trimDf(df):\n cols = set(df.columns)\n\n cols.remove('exclamationCount') # bug in our feature extraction code\n cols.remove('price') # considered only free apps\n cols.remove('appName') # removing appNames\n\n # return df[list(cols)]\n\n\n\n return df[list(('revSent', 'appLabel'))]", "def strip_static_cols(df):\n for col in df.columns:\n if len((df[col]).unique()) == 1:\n df.drop(columns=[col], inplace=True)\n return df", "def remove_blank_rows(df):\n clean_df = df.filter(df.columns[1:], axis=1).dropna(how='all', axis=0)\n idx_to_keep = clean_df.index\n clean_df = df.filter(idx_to_keep, axis=0)\n return clean_df", "def df_cleaner(df):\n return df.dropna()", "def remove_columns(df):\n avg = np.mean(df[df['sentiment'] != 'None']['sentiment'].astype('float'))\n df['sentiment'] = df['sentiment'].replace('None', avg).astype('float')\n\n to_remove = []\n print('column(s) removed: ')\n for column in df.columns:\n print(column)\n if(np.unique(df[column][df[column].notnull()]).shape[0] < 2):\n print(column)\n to_remove.append(column)\n \n return df.drop(columns = to_remove)", "def data_cleaning(df):\n df = df.dropna(how='all')\n \n return df", "def strip_columns(df: DataFrame) -> DataFrame:\r\n return df.apply(lambda x: x.str.strip() if x.dtype == 'object' else x)", "def drop_na_columns(df: pd.DataFrame, inplace=False) -> pd.DataFrame:\n return df.dropna(axis=\"columns\", how=\"all\", inplace=inplace)", "def remove_bad_columns(df):\n columns = []\n if 'job_runner_name' in df.columns:\n columns.append('job_runner_name')\n\n if 'handler' in df.columns:\n columns.append('handler')\n\n if 'destination_id' in df.columns:\n columns.append('destination_id')\n\n if 'input_file' in df.columns:\n columns.append('input_file')\n\n for column in columns:\n del df[column]\n\n return df", "def _remove_redundant_columns(self):\n self.dataframe.drop(['letter', 'sentiment'], axis=1, inplace=True)", "def df_cleaner(df):\n df = df.dropna()\n return df", "def filter(self):\n self.data = self.data.loc[~self.data.isnull().any(1),:]", "def clean(df):", "def drop_unnecessary_columns(data_frame, columns):\n missing_columns = [c for c in columns if c not in data_frame.columns]\n if missing_columns:\n raise ValueError(\"data frame is missing columns specified in config: \" + missing_columns)\n\n unnecessary_cols = [c for c in data_frame.columns if c not in columns]\n data_frame.drop(columns=unnecessary_cols, inplace=True)\n return data_frame", "def select_feats(df):\n cols = list(df)\n for col in cols:\n if col not in config[\"feats\"] and col != \"label\":\n df = df.drop(columns=col)\n return df", "def exclude_empty_feats(self):\n for dataset in self:\n dataset.dropna(axis=1, how=\"all\", inplace=True)", "def filterfeatures(df):\n\tfilter_arr = []\n\tfor f in df.columns:\n\t\tif not '.l' in f and not '.r' in f and not '.std' in f and f != 'weight' and f != 'class':\n\t\t\t# filter_arr.append(f.rstrip('.mean'))\n\t\t\tfilter_arr.append(f)\n\treturn filter_arr", "def delete_columns(houses:pd.DataFrame)-> pd.DataFrame:\n drop_columns= ['NEXT OPEN HOUSE START TIME', 'NEXT OPEN HOUSE END TIME', \n 'URL (SEE http://www.redfin.com/buy-a-home/comparative-market-analysis FOR INFO ON PRICING)',\n 'MLS#', 'FAVORITE', 'INTERESTED', 'LATITUDE', 'LONGITUDE',\n SOURCE, SALE_TYPE, CITY, STATE]\n houses= houses[houses[STATUS].isin(['Sold'])]\n houses= houses[houses[CITY].isin(['Irvine'])]\n return houses.drop(drop_columns, axis= 1)", "def removeNonQuant(df, cols):\r\n df = df[~(df[cols].isnull().all(1))]\r\n return df", "def get_columns_not_all_nulls(X, columns_to_check='all', rows_to_scan='all'):\n columns_to_check = get_list_of_columns_to_check(columns_to_check, X.columns)\n remove_columns = get_columns_with_all_nulls(X, columns_to_check, rows_to_scan)\n return list(set(columns_to_check)-set(remove_columns))", "def drop_unnecessary_columns(df):\n df = df.drop([\n 'id',\n 'imdb_id',\n 'poster_path',\n 'video',\n 'status',\n 'weighted_rating', # Only average_rating was used for this project\n 'original_title',\n 'crew', # Used in production_score\n 'producers', # Used in production_score\n 'executive_producers', # Used in production_score\n 'cast', # Used in production_score\n 'director', # Used in production_score\n 'production_companies', # Used in production_score\n 'production_countries', # Binarized\n 'genres', # Binarized\n 'original_language', # Binarized\n 'adult', # No adult movies in the dataset, so no variance between movies\n 'release_date', # Not being considered for this project\n 'overview',\n 'title',\n 'tagline',\n 'vote_average', # Ratings have been binned\n 'popularity', # Only considering average_rating\n 'vote_count', # We are making a predictor, so it makes no sense to use vote counts as input\n 'revenue', # We are making a predictor, so it makes no sense to use revenue as input\n 'keywords', # Not considering keywords for this project\n 'revenue_divide_budget', # We are making a predictor, so it makes no sense to use revenue/budget as input\n ], 1)\n return df", "def trim_dataframe(self) -> pd.DataFrame:\n self.remove_below_lower_length_limit()\n self.trim_to_upper_length_limit()\n return self.data", "def non_null_df(df, required_cols):\n return df.where(reduce(lambda x, y: x & y, (col(x).isNotNull() for x in required_cols)))", "def get_all_contests(data_frame) -> list:\n return [contest for contest in data_frame.columns if contest != 'Ballot Style']", "def remove_columns_missing_values(df, min_threshold):\n for col in df.columns:\n rate = sum(df[col].notnull())/float(len(df)) * 100\n if rate <= min_threshold:\n df = df.drop(col,1)\n return df", "def remove_columns(df, threshold, log=False):\n if log: sectionTimer = Timer(log=f\"removing columns with more than {threshold * 100}% of nans\")\n \n # removes columns with many nans\n non_nan_values = int(df.shape[0] * (1 - threshold))\n df_clean = df.dropna(thresh=non_nan_values, axis=1)\n dropped_cols = list(set(df.columns) - set(df_clean.columns))\n\n if log: sectionTimer.end_timer(log=f\"removed {len(set(df.columns)) - df_clean.shape[1]} columns\")\n return df_clean, dropped_cols", "def filter_df(df):\n filtered_df = df[df['rtt_author_ids'].notnull()]\n filtered_df = filtered_df[filtered_df['user_country'] == 'FR']\n filtered_df = filtered_df[filtered_df['retweet_count'] > 1]\n filtered_df = filtered_df[filtered_df['favourites_count'] > 1000]\n return filtered_df", "def remove_urequired_columns(self, unrequired_columns):\n self.df = self.df.drop(columns=unrequired_columns)", "def keep_columns(self, colnames):\n colnames = self.data.columns.intersection(colnames)\n return self.__class__(self.data.loc[:, colnames], self.meta.copy())", "def filter_by_count(df, min_columns, min_count):\n num_columns = len(df.columns)\n df = df.ix[df[df > min_count].isnull().sum(axis=1) < (num_columns - min_columns)]\n return df", "def preprocess(df):\n drop_cols = ['duration_ms', 'key', 'mode', 'time_signature', 'popularity', 'tempo']\n drop_cols += ['track_id', 'track_name', 'artist_name']\n for col in drop_cols:\n if col in list(df.columns):\n df = df.drop(columns=col)\n return df", "def reduce_data_to_necessary_columns(filtered_df):\n hist_df = filtered_df[\n [\n \"UniqueName\",\n \"Joins\",\n \"Projection_Attributes\",\n \"Selection_Attributes\",\n \"GroupBy\",\n \"OrderBy\",\n \"Strings\",\n \"Tables\",\n ]\n ].set_index(\"UniqueName\")\n return hist_df", "def df_drop_cols(df, col_keep_ls, inplace=True):\n import pandas\n \n vdf_mem_map = isinstance(df, vaex.hdf5.dataset.Hdf5MemoryMapped)\n vdf_df_arr = isinstance(df, vaex.dataframe.DataFrameArrays)\n\n if (vdf_mem_map) or (vdf_df_arr):\n all_col_names = set(df.column_names)\n elif isinstance(df, pandas.core.frame.DataFrame):\n all_col_names = set(df.columns)\n \n col_keep_set = set(col_keep_ls)\n col_drop_set = all_col_names - col_keep_set\n \n for col in col_drop_set:\n if (vdf_mem_map) or (vdf_df_arr):\n df.drop(col, inplace=inplace)\n elif isinstance(df, pandas.core.frame.DataFrame):\n df.drop(col, axis=1, inplace=inplace)", "def missing_columns(self):\r\n _missing_columns = set(self.reqd_columns).difference(set(self.all_columns))\r\n return list(_missing_columns)", "def filter_by(df, constraints):\n indexer = [constraints[name] if name in constraints else slice(None)\n for name in df.index.names]\n return df.loc[tuple(indexer)] if len(df.shape) == 1 else df.loc[tuple(indexer),]", "def filter_data(self):\n self.df = self.df[HeatStrokeDataFiller.important_features]", "def get_cols_drop():", "def _unselected_columns(self, X):\n X_columns = list(X.columns)\n return [column for column in X_columns if\n column not in self._selected_columns]", "def remove(dataframe, limit=250):\n logfile = open('logfile_removecolumns.txt', 'w') # Create a logfile\n logfile.write('=====> Time: %s <=====\\n' % time.asctime(time.localtime()))\n logfile.write('=====> Log from file %s.py <===== \\n\\n' % __name__)\n\n columns_overview = dataframe.columns.summary() # Create an overview of the dataframe\n cols_list = dataframe.columns.tolist()\n cols_to_be_deleted = list()\n logfile.write('Overview of the dataframe: \\n%s' % columns_overview)\n\n for stock in range(len(cols_list)): # Walk through all stocks\n if dataframe[cols_list[stock]].isnull().sum() > limit: # Check No. of null values in a column\n cols_to_be_deleted.append(cols_list[stock])\n \n logfile.write('\\nNo. of Columns with more that %d missing values: %s\\n'\n % (limit, len(cols_to_be_deleted)))\n logfile.write('Deleted columns:\\n')\n for col in cols_to_be_deleted:\n logfile.write('%s \\n' % str(col))\n logfile.close()\n \n # Return updated dataframe or list of columns. See test code below\n dataframe_updated = dataframe[dataframe.columns.drop(cols_to_be_deleted)]\n return dataframe_updated", "def clean_na_rows_any(df: pd.DataFrame, col1: str, col2: str) -> pd.DataFrame:\n\n # create mask to filter df with rows that have null values\n # in any of the columns indicated in col1 and col2\n slice1 = df.columns.get_loc(col1)\n slice2 = df.columns.get_loc(col2)\n mask = [all(df.iloc[row, slice1 : slice2 + 1].notna()) for row in range(len(df))]\n # filter df\n df = df.iloc[mask]\n df.reset_index(drop=True, inplace=True)\n print(\n f\"Shape after removing null rows between '{df.columns[slice1]}'\\n and '{df.columns[slice2]}'':\\n\"\n )\n print(df.shape, \"\\n\")\n\n return df", "def prepare_output_df(df: DataFrame, kind: str) -> DataFrame:\r\n columns = get_export_columns(kind)\r\n to_drop = list(filter(lambda x: x not in columns.keys(), df.columns.to_list())) # For any columns not in the get_export_columns()\r\n df = df.drop(columns=to_drop) # mapping, drop them from the DataFrame.\r\n df = df.rename(columns=columns)\r\n return df", "def test_drop_zero_variance_on_subset_columns(data):\n step = DropZVColumnsStep(['name', 'released'], naomit=True)\n bdf = step.prepare(data).bake(data)\n\n assert 'name' in bdf.columns\n assert 'released' in bdf.columns\n assert 'episodes' in bdf.columns", "def drop_const_columns(df, drop_columns=True, print_columns=True):\n\n\n\n # 1. report\n\n SingleValueCols = []\n for col in df.columns:\n unique_count=df[col].nunique()\n if unique_count < 2:\n SingleValueCols.append(col)\n if print_columns:\n print(col, unique_count)\n\n print\n print('Constant columns count: %s' % len(SingleValueCols))\n\n # 2. dropping\n if drop_columns:\n print('%s columns total' % df.shape[1])\n df = df.drop(SingleValueCols, 1)\n print('%s columns left' % df.shape[1])\n\n return df", "def exclude_duplicates(self):\n from axs.catalog import AxsCatalog\n return self.filter(self._df[AxsCatalog.DUP_COLNAME] == 0)", "def drop_nan_columns(arrays):\n\n # Keep all column indices\n not_nan_filter = ones(len(arrays[0]), dtype=bool)\n\n # Currently keeping all columns!\n\n # Keep column indices without missing value in all arrays\n # for a in arrays:\n # not_nan_filter &= ~isnan(a)\n\n return [a[not_nan_filter] for a in arrays]", "def test_column_presence(self):\n\n columns = [\"feature_is_filtered\", \"feature_biotype\"]\n\n for component_name in [\"var\", \"raw.var\"]:\n for column in columns:\n if column == \"feature_is_filtered\" and component_name == \"raw.var\":\n continue\n with self.subTest(component_name=component_name, column=column):\n\n # Resetting validator\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n\n component = Validator.getattr_anndata(\n self.validator.adata, component_name\n )\n component.drop(column, axis=1, inplace=True)\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n f\"ERROR: Dataframe '{component_name}' is missing \"\n f\"column '{column}'.\"\n ],\n )", "def obj_df(df):\n mask = np.array(df.dtypes == 'object')\n df_obj = df.iloc[:, mask]\n return df_obj", "def filter_empty_genes(data, *extra_data):\n gene_sums = np.array(utils.matrix_sum(data, axis=0)).reshape(-1)\n keep_genes_idx = gene_sums > 0\n data = select.select_cols(data, *extra_data, idx=keep_genes_idx)\n return data", "def _remove_non_informative_rows(self, df, threshold):\n df_tmp = pd.DataFrame()\n n_features = len(df.columns)\n # calculating ratio of rows that have more than \"ratio\" missing values\n df_tmp['ratio'] = df.apply(lambda row: row.isnull().sum()/n_features, axis='columns')\n\n # kick too noisy rows\n return df[df_tmp['ratio'] <= threshold]", "def drop_columns(self, columns):\n dframe = self.dframe(keep_parent_ids=True)\n self.replace_observations(dframe.drop(columns, axis=1))", "def test_structural_remove_columns_all_1_0(self):\n cp = Plotter.from_smiles(['CCCC', 'CCCC'], sim_type=\"structural\")\n self.assertTrue(cp._Plotter__df_descriptors.empty)", "def drop_columns(*, df, columns_to_drop, verbose=True):\n \n assert type(df)==pd.DataFrame, \"please provide df in pandas dataframe format\"\n df = df.copy()\n \n # find unique values in a list, just in case I made the mistake, \n columns_to_drop = list(pd.Series(columns_to_drop).unique())\n\n # .. info, header, \n if verbose==True:\n print(f\"\"\"Removing {len(columns_to_drop)} columns from df\"\"\") \n else:\n pass\n\n \n # remove columns one by one, \n for i,j in enumerate(columns_to_drop):\n try:\n df.drop(columns=[j], axis=1, inplace=True)\n if verbose==True:\n print(f\"{i} removing: {j}, ==> new df.shape: {df.shape}\")\n else:\n pass\n \n except:\n if verbose==True:\n print(f\"{i} .... column: {j}, was not found in df, check if name is correct....\")\n else:\n pass\n \n return df", "def remove_zero_features(df,no_zeros = 1):\n thing = df.astype(bool).sum(axis=0) # number of nonzeros in each column\n idx = pd.Index(thing) #Index format\n location = idx.get_loc(no_zeros) # Set all elements that are 1.0 to True, rest to False.\n loc_of_one = np.asarray(np.nonzero(location)) #Array of columns with only one nonzero element\n loc_of_one = loc_of_one[0]\n df_new = df.drop(df.columns[loc_of_one], axis = 1) # New reduced dataframe\n return df_new", "def strip_ds(ds):\n if 'brain' in np.unique(ds.sa.all_ROIs):\n ds = ds[(ds.sa.all_ROIs != 'brain'), :]\n print('excluded the rest of the brain from the dataset')\n if 'overlap' in np.unique(ds.sa.all_ROIs):\n ds = ds[(ds.sa.all_ROIs != 'overlap'), :]\n print('excluded overlap from the dataset')\n return ds", "def sanitize_data(data: pd.DataFrame) -> pd.DataFrame:\n # discard nan values\n data.dropna(inplace=True)\n\n # Create conformers\n data['molecules'].apply(lambda mol: AllChem.EmbedMolecule(mol))\n\n # Discard molecules that do not have conformer\n LOGGER.info(\"Removing molecules that don't have any conformer.\")\n data = data[data['molecules'].apply(lambda x: x.GetNumConformers()) >= 1]\n\n return data", "def filter(df: pd.DataFrame = pd.DataFrame()):\n if df.empty:\n df = read()\n print('Filtering data...')\n df = df.dropna()\n df2 = pd.DataFrame()\n df2['Longitude'] = df['Longitude']\n df2['Latitude'] = df['Latitude']\n df2['Month'] = df['Date'].dt.strftime('%m').astype(int)\n df2['Day'] = df['Date'].dt.strftime('%d').astype(int)\n df2['Day_of_Week'] = df['Day_of_Week']\n df2['Time'] = np.array([t.timestamp() for t in df['Time']]) - df['Time'].min().timestamp()\n df2['Weather_Conditions'] = df['Weather_Conditions']\n return pd.get_dummies(df2)", "def drop_missing_values_in_dataframe(dataframe):\r\n return dataframe.dropna()", "def drop_transafers(df):\n return df.filter(~(df.valor == 0))", "def drop_nonserious_rows(table, column_name):\n # encode table's nan as None\n for column in table.labels:\n encode_nans(table, column)\n full_df = table.to_df()\n start_idx = table.column_index(column_name)\n tbl = table.select(range(start_idx, table.num_columns))\n df = tbl.to_df()\n na_df = df.notna()\n full_df = full_df[np.array(na_df.apply(np.sum, axis=1) != 0)]\n return Table.from_df(full_df)", "def get_cols(df):\n meta = get_metafeatures(df)\n categorical_columns = meta.loc[meta['type'] == 'object', 'column'].tolist()\n cols_to_drop = meta.loc[meta['missing'] > 0.5, 'column'].tolist()\n logging.debug('%s categorical columns found', len(categorical_columns))\n logging.debug('%s columns will be dropped', len(cols_to_drop))\n return categorical_columns, cols_to_drop", "def test_drop_zero_variance_columns_considering_NA_will_not_drop_any_column(data):\n step = DropZVColumnsStep()\n bdf = step.prepare(data).bake(data)\n\n assert 'name' in bdf.columns\n assert 'released' in bdf.columns\n assert 'episodes' in bdf.columns", "def filter_data(self):\n self.data = filter_pandas(self.data, self.filters)", "def test_drop_zero_variance_on_subset_columns_with_zv_removals(data):\n step = DropZVColumnsStep(['released', 'episodes'], naomit=True)\n bdf = step.prepare(data).bake(data)\n\n assert 'name' in bdf.columns\n assert 'released' in bdf.columns\n assert 'episodes' not in bdf.columns", "def trim_all_columns(df):\n trim_strings = lambda x: x.strip() if isinstance(x, str) else x\n return df.applymap(trim_strings)", "def remove_reserved_keys(df, exclude=[]):\n reserved_keys = __column_intersect(\n df, BAMBOO_RESERVED_KEYS).difference(set(exclude))\n\n return df.drop(reserved_keys, axis=1)", "def remove_rows_without_feature(df, feature):\n return df[np.isfinite(df[feature])]", "def deletingNaNs(df):\n # start_ time.time()\n df_old = df.copy()\n df.dropna(axis=1, how='any', inplace=True)\n for key in df_old:\n if str(key) not in df:\n print('Deleted ', key)\n # end_time time.time()\n #print('Time to run deletingNaNs: ', end_time - start_time)\n return df", "def select_variables(df, dtype=\"numeric\"):\n numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']\n if dtype == \"numeric\":\n subset = df.copy().select_dtypes(include = numerics)\n else:\n subset = df.copy().select_dtypes(include != numerics)\n return(subset)", "def drop_columns(df: DataFrame, *columns_to_drop: str) -> DataFrame:\n return df.drop(*columns_to_drop)", "def others(self) -> List[str]:\n exclude = self._obj._names[\"covariates\"] + DATA_COLS\n return [col for col in self._obj.columns if col not in exclude]", "def get_columns_with_all_nulls(X, columns_to_check='all', rows_to_scan='all'):\n rows_to_scan = get_rows_to_scan(rows_to_scan, X.shape[0])\n columns_to_check = get_list_of_columns_to_check(columns_to_check, X.columns)\n mask = np.array(X[columns_to_check][:rows_to_scan].count() == 0)\n return list(np.array(columns_to_check)[mask])", "def filter_schema(schema):\n for column, column_schema in schema.iteritems():\n if column_schema.get(CARDINALITY):\n del column_schema[CARDINALITY]\n schema[column] = column_schema\n\n return schema", "def _prune_unused_columns(\n block: Block,\n key: str,\n aggs: Tuple[AggregateFn],\n ) -> Block:\n prune_columns = True\n columns = set()\n\n if isinstance(key, str):\n columns.add(key)\n elif callable(key):\n prune_columns = False\n\n for agg in aggs:\n if isinstance(agg, _AggregateOnKeyBase) and isinstance(agg._key_fn, str):\n columns.add(agg._key_fn)\n elif not isinstance(agg, Count):\n # Don't prune columns if any aggregate key is not string.\n prune_columns = False\n\n block_accessor = BlockAccessor.for_block(block)\n if (\n prune_columns\n and isinstance(block_accessor, TableBlockAccessor)\n and block_accessor.num_rows() > 0\n ):\n return block_accessor.select(list(columns))\n else:\n return block", "def to_df(self):\n from ..df import DataFrame\n\n return DataFrame(self.table).filter_parts(self)", "def remove_empty_rows(dataframe: pd.DataFrame, column_name: str):\n original_size = len(dataframe)\n dataframe[column_name].replace(\" \", np.nan, inplace=True)\n dataframe[column_name].replace(\"\", np.nan, inplace=True)\n dataframe.dropna(subset=[column_name], inplace=True)\n dataframe.reset_index(drop=True, inplace=True)\n new_size = len(dataframe)\n print(f\"A total of {original_size - new_size} rows were dropped\")", "def exclude_cols(self, *_, **__) -> Tuple[str, ...]:", "def drop_uniform_slice_from_dataframe(df, value, axis=0):\n\n if axis == 0:\n dropped = (df == value).all(axis=0)\n if any(dropped):\n print('Removed {} column index(ices) whose values are all {}.'.\n format(dropped.sum(), value))\n return df.ix[:, ~dropped]\n\n elif axis == 1:\n dropped = (df == value).all(axis=1)\n if any(dropped):\n print('Removed {} row index(ices) whose values are all {}.'.format(\n dropped.sum(), value))\n return df.ix[~dropped, :]", "def remove_null_cols(df, thresh=0.08):\n \n # look at this\n # df.dropna(thresh=int(df.shape[0] * .9), axis=1)\n pct_null = df.isnull().sum() / len(df)\n missing_features = pct_null[pct_null > thresh].index\n return df.drop(missing_features, axis=1)", "def drop_dups(df,col_names=None):\n return df.dropDuplicates()", "def get_x_dataframe_without_time_column(self):\n if self.x_columns is None:\n raise Exception\n\n return utilities.impute_missing_data(self.dataframe.loc[:, self.x_columns])", "def columns_to_fix(df):\n return [col for col in df.columns.values if any([k in col and v in col for k, v in symmetric_dihedrals.items()])]", "def drop_duplicates_rows(df: DataFrame, *criteria_columns: str) -> DataFrame:\n\n if criteria_columns:\n return df.dropDuplicates(subset=[*criteria_columns])\n else:\n return df.dropDuplicates()", "def clean_other_dfs(df: pd.DataFrame) -> pd.DataFrame:\n cols = map(convert_to_snake_case, df.columns)\n df.columns = cols\n return df", "def filter(df, predicate):\n if not df:\n return []\n\n return [row for row in df if predicate(row)]", "def all(self, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.all)(self, **kwargs)", "def _filter(self, col: str, val: Any) -> pd.DataFrame:\n return self._df[self._df[col] == val]", "def all_minimal():\n results = DatalabData.filter_minimal(None, None, None, False)\n return results" ]
[ "0.7230408", "0.7230408", "0.7211834", "0.7153902", "0.7082774", "0.70773596", "0.69314176", "0.6903499", "0.6895523", "0.66686505", "0.6667085", "0.66075593", "0.6605839", "0.658763", "0.6533146", "0.6488316", "0.6451615", "0.64211375", "0.64115214", "0.64103115", "0.64027435", "0.6392951", "0.6389337", "0.63637966", "0.6362447", "0.6342778", "0.63378257", "0.6279769", "0.6268208", "0.62176996", "0.62144923", "0.61959064", "0.6188411", "0.6186195", "0.6083078", "0.6049046", "0.6030395", "0.6030165", "0.60163826", "0.6006826", "0.5979239", "0.5975324", "0.5962083", "0.5942078", "0.59314305", "0.5910953", "0.5909594", "0.5901926", "0.59002554", "0.5892034", "0.5890895", "0.5858349", "0.5848257", "0.5836999", "0.58175725", "0.5811415", "0.58061206", "0.57774854", "0.5773468", "0.5772674", "0.5765508", "0.5755353", "0.57507044", "0.574601", "0.57432014", "0.57187873", "0.5715498", "0.5699204", "0.5698963", "0.56967664", "0.5690544", "0.5688389", "0.5683852", "0.5679305", "0.56731766", "0.5664012", "0.5651397", "0.5650162", "0.5642428", "0.5635777", "0.5629364", "0.5622539", "0.5617888", "0.56121105", "0.56027126", "0.55980283", "0.55970615", "0.5586228", "0.55791193", "0.5570312", "0.5569389", "0.5566887", "0.5550225", "0.55497813", "0.5549534", "0.55344313", "0.5531701", "0.5530661", "0.5528998", "0.5526387" ]
0.5810948
56
Find all possible values of a column in the pandas.DataFram list
def dfs_all_values(dfs, column): values = [] # loop over all (pandas.DataFrame, str) pairs for df in dfs: values.extend(df[column].tolist()) # set() removes duplicates # sorted() converts Set to List and sort the elements return sorted(set(values))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getValuesForColumn(self, columnname):\n return list(self.abundance_df[columnname].unique())", "def get_values(df):\n return df.columns.values.tolist()", "def get_binary_values(data_frame):\n all_columns = pandas.DataFrame( index = data_frame.index)\n for col in data_frame.columns:\n data = pandas.get_dummies(data_frame[col], prefix=col.encode('ascii', 'replace'))\n all_columns = pandas.concat([all_columns, data], axis=1)\n return all_columns", "def get_binary_values(data_frame):\n all_columns = pandas.DataFrame( index = data_frame.index)\n for col in data_frame.columns:\n data = pandas.get_dummies(data_frame[col], prefix=col.encode('ascii', 'replace'))\n all_columns = pandas.concat([all_columns, data], axis=1)\n return all_columns", "def get_values(self, col) :\n\n if col not in self.cols :\n raise Exception('Column %s not in data' % col)\n\n select_sql = 'SELECT \"%s\" FROM \"%s\" ORDER BY __ROWID ASC' % (col, self.name)\n cur = self.con.cursor()\n cur.execute(select_sql)\n vs = cur.fetchall()\n return [v[0] for v in vs]", "def create_value_set(self, col):\n\n value_set = set()\n\n for df in self:\n value_set.update(df[col])\n return value_set", "def column_values(table: list[dict[str, str]], column: str) -> list[str]:\n values: list[str] = []\n for row in table:\n values.append(row[column])\n return values", "def normalize(column):\n value_set = set(column)\n unique_count = len(value_set)\n if unique_count == 1:\n # skip everything in this column. \n return []\n elif unique_count == 2:\n zero = list(value_set)[0]\n one = list(value_set)[1]\n normalized_column = []\n for value in column:\n normalized_column.append(1 if value == one else 0)\n return [normalized_column]\n else: \n all_values = list(value_set)\n normalized_column = []\n\n # expand into multiple columns \n for index in range(len(all_values)):\n normalized_column.append([])\n\n for value in column:\n for index in range(len(all_values)):\n normalized_column[index].append(1 if value == all_values[index] else 0)\n \n return normalized_column", "def flat_set_from_df(df, col, condition=None):\n if condition is not None:\n df = df[condition]\n lists = df[col].tolist()\n return set([item for sublist in lists for item in sublist])", "def column_values(table: list[dict[str, str]], column: str) -> list[str]:\n result: list[str] = []\n for row in table:\n item: str = row[column]\n result.append(item)\n\n return result", "def column_values(table: list[dict[str, str]], column: str) -> list[str]:\n result: list[str] = []\n for row in table:\n item: str = row[column]\n result.append(item)\n\n return result", "def values(self, cols=None) :\n\n if not cols or cols == self.cols :\n return self.data\n\n def extractor(col) :\n if col in self.cols :\n return self.data[self.cols.index(col)]\n else :\n return None\n \n return [extractor(col) for col in cols]", "def column_select(df,returnList = [\"x\",\"y\"]):\n df = df.sort_values(by = 'frame_id')\n return [ list(df[k]) for k in returnList]", "def create(df,column,list_):\n return df[df[column].isin(list_)]", "def column_values(table: list[dict[str, str]], column_name: str) -> list[str]:\n column_values: list[str] = []\n for row in table:\n item: str = row[column_name]\n column_values.append(item)\n return column_values", "def test_multiple(self):\n df = self.df.copy()\n out = get_full_column(df.values)\n self.assertTrue(out == 0)", "def unique_values(df):\n cols = list(df.columns)\n\n for col in cols:\n uniques = (df[col]).unique()\n print(f\"{len(uniques)} unique items in {col}: {df[col].loc[0]},{df[col].loc[1]}, {df[col].loc[2]}...\")", "def list_unique(df):\n\n # print unique values of each column\n for col in df.columns:\n print(f\"{col}:\")\n print(f\"{list(df[col].unique())}\\n\")", "def unique (a_data,a_column) :\n return list(__np.unique(a_data[a_column]))", "def getColVals(self, col=None, include_nones=None):\n if col is None or col < 1 or col > self.nCol:\n raise SelectError(f\"bad col number {col}\")\n \n vals = []\n for ri in range(self.nRow):\n row = ri + 1\n val = self.getCellVal(row=row, col=col)\n if include_nones or not self.isEmpty(val):\n vals.append(val)\n return vals", "def powerset(iterable):\n try:\n s = list(iterable)\n column_names = list(chain.from_iterable(combinations(s, r) \n for r in range(len(s)+1)))[1:]\n col_list = []\n for i in range(6,0,-1):\n col_list += [item for item in column_names if len(item)==i]\n return col_list\n except:\n my_message = \"\"\"\n ERROR - STEP 2 (MASTER): FAILED MAKING THE POWERSET OF TECHNOLOGIES\n \"\"\"\n my_message = ' '.join(my_message.split()) + '\\n' + traceback.format_exc()\n print(my_message)\n return None", "def column_values_in_list(col, test_list):\n test = np.array([c_i in test_list for c_i in col])\n return test", "def unique_column_values(rows, column_name):\n # declare a set that guarantees no duplicates in the answer\n value_set = set()\n # for all rows, add the value of indicated column to the set\n for row in rows:\n \tvalue_set.add(row[column_name])\n return value_set", "def unique_column_values(rows, column_name):\r\n\r\n values = [] #Create an empty list\r\n for row in rows: #Iterate through each row\r\n values.append(row[column_name]) \r\n values = set(values)\r\n return values", "def getLegalVals(self, row=None, col=None): # Returns: array of legal values\n if (row is None or row < 1 or row > self.nRow\n or col is None or col < 1\n or col > self.nCol): # Safety check\n return [] \n \n usedH = {} # Add to list as found\n # Allow EMPTY\n row_vals = self.getRowVals(row)\n for row_val in row_vals:\n usedH[row_val] = 1\n\n col_vals = self.getColVals(col)\n for col_val in col_vals:\n usedH[col_val] = 1\n\n sq3_vals = self.getSq3Vals(row, col)\n for sq3_val in sq3_vals:\n usedH[sq3_val] = 1\n\n \n legal_vals = []\n for n in range(1, self.nRow+1):\n if n not in usedH:\n legal_vals.append(n) \n \n if SlTrace.trace(\"any\"):\n lvstrs = list(map(str, sorted(legal_vals)))\n SlTrace.lg(f\"getLegals(row={row}, col={col} = \"\n + \", \".join(lvstrs))\n \n return sorted(legal_vals)", "def values(self):\n return [entry.value for entry in self.table if entry.value is not None]", "def get_matching_columns(self, columns):\n result = []\n for column in columns:\n if self.match(column):\n result.append(column)\n return result", "def get_possible_values(self):\n possible_values = {}\n for f in self.__features:\n possible_values[f] = list(self.__data[f].unique())\n return possible_values", "def get_needed_columns(df, list_of_columns):\n return df[list_of_columns]", "def as_list(df: pandas.DataFrame, row=-1) -> list:\n if df is None:\n return []\n if row >= 0:\n rec = []\n for col in range(0, 13):\n rec.append(df.iat[row, col])\n return rec\n recs = []\n for row in range(df.shape[0]):\n recs.append(as_list(df, row=row))", "def iterall(self):\r\n return (column for name, column in self.iteritems())", "def get_unique(self):\n unique_values = len(self.df[self.col_name].unique())\n return unique_values", "def get_sample_numbers(cons_df: pd.DataFrame) -> Set[int]:\n\n all_cols = list(cons_df.columns)\n sample_nbrs = set([int(col.split('_')[-1]) for col in all_cols[1:] if not col.endswith('cf')])\n return sample_nbrs", "def indices_of(self, col_name, value):\n return list(self._obj[self._obj[col_name] == value].index\n ) if col_name in self._obj.columns else None", "def test_column_data(self):\n c = Column('foo', range(3))\n self.assertEqual(list(c), [0, 1, 2])", "def get_values(self):\n return set(self._table.keys())", "def GetColumnsOption(self, data) :\n indices = [ int(x.replace(self.label, '')) for x in data.columns if self.label in x and x.replace(self.label, '')!='' ]\n return indices", "def __column_intersect(df, list_):\n return set(list_).intersection(set(df.columns.tolist()))", "def get_unique_elements(column_name:str) -> list:\n c=data_to_cluster[column_name].values.tolist()\n cuisines_list=[]\n for i in range(len(c)):\n item=ast.literal_eval(c[i])\n for j in range(len(item)):\n cuisines_list.append(item[j][0])\n c_s=set(cuisines_list)\n cuisines=list(c_s)\n return cuisines", "def possible_values(self, seq):\n for element in seq:\n if element:\n return element\n return False", "def get_dummies_list(self, \n cols=['elite'],\\\n drop_=True):\n for col in cols:\n print \"Pre-processing \" + col + \"...\"\n temp = pd.get_dummies(self.df[col].apply(pd.Series).stack(),drop_first=True)\\\n .astype(int).sum(level=0).astype(int)\n # temp.columns.apply(str).apply(lambda x: col + \"_\" + x)\n if drop_:\n self.df.drop(col,axis = 1, inplace=True)\n self.df = pd.concat([self.df, temp],axis=1)", "def possible_values(self) -> Set[int]:\n return {x for x in SudokuTile.UNIVERSE_OF_TILE_VALUES if\n (x not in self._row) and\n (x not in self._column) and\n (x not in self._box)}", "def get_column_multi(self, db, table, col_short):\n ret_col = []\n col_short_patterns = col_short.lower().split(',')\n\n for col_pattern in col_short_patterns:\n col_name = self.get_column_single(db, table, col_pattern)\n if col_name and col_name not in ret_col:\n ret_col.append(col_name)\n continue\n\n return ret_col", "def get_all_values(self):\n return self.display_table.get_all_values(root=self.display_table_root,include=self.params)", "def get_criteria_values(self) -> list or None:\n\n # query_string = 'SELECT * FROM [{}];'.format(self.settings.excelSheetName)\n query_string = 'SELECT DISTINCT [{}] FROM [{}];'.format(self.settings.excelCriteriaSelector,\n self.settings.excelSheetName)\n\n if self.connected and self.settings.excelSheetName:\n cursor = self.workbook.cursor()\n if cursor:\n criteria_values = []\n for row in cursor.execute(query_string):\n criteria_values.append(row[self.settings.excelCriteriaSelector.lower()])\n cursor.close()\n return criteria_values\n\n return None", "def potential_splits(self, potential_xj):\r\n \r\n self.cur.execute(\"SELECT DISTINCT \" + potential_xj + \" FROM \" + self.table_name + \";\")\r\n potential_splits = [ii[0] for ii in self.cur.fetchall()]\r\n return potential_splits", "def unique_vals(rows, col):\n return set([row[col] for row in rows])", "def getObjectComponents(df):\n return df.loc[getObjectComponentIndexes(df)]", "def filter_by_isin(df: pd.DataFrame, column: str, values: Iterable) -> pd.DataFrame:\n # First, create a \"map\" series from all possible values in the column => whether they should pass the filter\n all_ids = df[column].unique()\n is_id_relevant = pd.Series(np.zeros(len(all_ids)), index=all_ids).astype('bool') # Default false\n is_id_relevant.loc[values] = True\n\n # Create a boolean mask for column, based on the mapping above. Grab the raw array.\n mask = is_id_relevant[df[column]].values\n # Apply mask\n return df[mask]", "def getListOfPossibleSpeciesFeatureValues(self, *args):\n return _libsbml.SpeciesFeatureType_getListOfPossibleSpeciesFeatureValues(self, *args)", "def competitions(self) -> DataFrame[Any]:", "def convert_col_to_list(df, col='Question and Answer'):\n text = df[col].values.tolist()\n return text", "def get_returns_columns(df: pd.DataFrame) -> list:\n return [col for col in df.columns if '_period_return' in col]", "def modify_df(dataframe: pd.DataFrame) -> list[pd.DataFrame]:\n\n # Extract the elements and number of them from formula\n def grab_elements_and_number_of(formula):\n element_tup = tuple(np.sort(re.findall(r\"([A-Z][a-z]*)\", formula)))\n return element_tup, len(element_tup)\n\n dataframe[\"element_tup\"], dataframe[\"number_elements\"] = zip(\n *dataframe.formula.apply(grab_elements_and_number_of)\n )\n\n # Filter the df to only include unary, binary materials\n dataframe = dataframe[\n (dataframe[\"number_elements\"] == 1) | (dataframe[\"number_elements\"] == 2)\n ]\n\n # Create a df of minimum values\n min_E_df = dataframe.groupby(by=[\"element_tup\"]).agg({\"energy\": \"min\"})\n min_E_df = min_E_df.reset_index()\n\n return [dataframe, min_E_df]", "def vectors_from_dataframe(*columns):\n return lambda df: [np.array(v) for v in zip(*[list(df[x].values) for x in columns])]", "def values(self):\n return [row.values for row in self]", "def get_value_set(self, tag_pattern=None, normalise=False):\n value_set = set([])\n if tag_pattern:\n tag_pattern = TagPattern.parse(tag_pattern)\n for row in self:\n if tag_pattern:\n new_values = row.get_all(tag_pattern)\n else:\n new_values = row.values\n if normalise:\n new_values = [hxl.datatypes.normalise(s) for s in new_values]\n else:\n new_values = [hxl.datatypes.normalise_space(s) for s in new_values]\n value_set.update(new_values)\n return value_set", "def _get_col_subset(self, X, cols):\n return_vector = False\n if isinstance(cols, string_types):\n return_vector = True\n cols = [cols]\n\n if isinstance(X, list):\n X = [x[cols] for x in X]\n X = pd.DataFrame(X)\n\n elif isinstance(X, DataWrapper):\n # if it's a datawrapper, unwrap it\n X = X.df\n\n if return_vector:\n t = X[cols[0]].values\n else:\n t = X[cols].values\n\n return t", "def get_data_labels(answer_mapping_df, column):\n labels = []\n for i in answer_mapping_df[column].columns.values:\n labels.append(answer_mapping_df.xs((column, i), level=('q_code', 'a_code'), axis=1).iloc[0,0])\n return labels", "def _get_column_values(self, col: str) -> ndarray:\n dtype, loc = self._get_col_dtype_loc(col) # type: str, int\n return self._data[dtype][:, loc]", "def find_options(self, current_row, current_column, subgrid_number)->list:\n row = self.number_for_row\n column = self.number_for_column\n subgrid = self.subgrid[subgrid_number].avaliable_numbers\n options = list(set(row[current_row]) & set(column[current_column]) & set(subgrid))\n options = [x for x in options if x != 0]\n return options", "def columns_values(self):\r\n return self._columns_values", "def values(self):\n return [ self[x] for x in self ]", "def get_all_contests(data_frame) -> list:\n return [contest for contest in data_frame.columns if contest != 'Ballot Style']", "def data(self) -> List[List[Any]]:\n\n column_wise = [column.values for column in self.plaincolumns]\n row_wise = [list(row) for row in zip(*column_wise)]\n\n return row_wise", "def itervaluerefs(self):\r\n return self.data.itervalues()", "def get_unique_values(df, colname):\n\treturn list(dict(df[colname].value_counts(ascending=False, dropna=False)).keys())", "def _sample_using_a_list(\n self,\n column_name: str,\n value_list: list,\n ):\n return sa.column(column_name).in_(value_list)", "def columns(self) -> java.util.Collection:\n ...", "def get_all_values(self, sheet_name):\n case_list = []\n head = self.head(sheet_name)\n case = namedtuple('case', head)\n all_values = self.wb[sheet_name].iter_rows(min_row=self.excel_min_row(sheet_name) + 1,\n max_col=self.excel_max_col(sheet_name),\n values_only=True)\n for value in all_values:\n case_list.append(case(*value))\n return case_list", "def getcols(d,l,*args):\n ret = []\n for i in range(len(args)):\n ret = ret + [getcol(d[args[i]],l)]\n return ret", "def _select_data(\n self, db: str, table: str, column_filters: Dict[str, str]\n ) -> List[List]:\n pass", "def _dofidxs(self):\n return [const['dofidxs'] for i, const in self._constraints_df.iterrows()]", "def unique_vals(client, proj, dataset, table, col_name):\n if not client.check_table(dataset, table):\n return []\n res = run_bq_query(client, \"SELECT %s FROM [%s:%s.%s] GROUP BY %s ORDER BY %s\" % (col_name, proj, dataset, table, col_name, col_name), 120)\n return [rec[col_name] for rec in res]", "def check_base_fields(df,base_fields):\n emp_list = []\n for item in base_fields:\n if item not in list(df.columns):\n emp_list.append(item)\n\n return emp_list", "def values(self) -> List:\n pass", "def columns(self, model=None):\n column = self.column(model=model)\n if column:\n yield column\n\n check = self.__value\n if not isinstance(check, (list, set, tuple)):\n check = (check,)\n\n for val in check:\n if isinstance(val, (Query, QueryCompound)):\n for col in val.columns(model):\n yield col", "def uninformative_columns(df: pd.DataFrame) -> Iterator[Tuple[str, Any]]:\n # TODO: support DataFrames where df.columns is a MultiIndex\n for column in df.columns:\n series = df[column]\n series_iter = iter(df[column])\n try:\n exemplar = next(series_iter)\n except StopIteration:\n # no rows => nothing to check :|\n continue\n # nan is a special case, since np.nan != np.nan\n if series.dtype == np.float and np.isnan(exemplar):\n if all(np.isnan(item) for item in series_iter):\n yield column, exemplar\n elif all(item == exemplar for item in series_iter):\n yield column, exemplar", "async def values(self, *args, distinct=False, flat=False, group_by=None):\n if flat and len(args) != 1:\n raise ValueError(\n \"Values with flat=True can only have one param\")\n if args:\n model = self.proxy.model\n columns = []\n for col in args:\n if isinstance(col, str):\n col = resolve_member_column(model, col)\n columns.append(col)\n q = self.query('select', *columns)\n else:\n q = self.query('select')\n if group_by is not None:\n q = q.group_by(group_by)\n if distinct:\n q = q.distinct()\n cursor = await self.proxy.fetchall(q, connection=self.connection)\n if flat:\n return [row[0] for row in cursor]\n return cursor", "def potential_values(self) -> Set[Hashable]:\n\t\treturn set(self.iter_potential_values())", "def identify_columns(structure: dict):\n\tknown_columns = list()\n\n\t# collect columns\n\tfor day in structure['data']:\n\t\tfor key in day.keys():\n\t\t\tif key not in known_columns:\n\t\t\t\tknown_columns.append(key)\n\n\treturn known_columns", "def get_unique_tags(df):\n tags = []\n\n for index, row in df.iterrows():\n tags = list(set(tags + ast.literal_eval(row.tags)))\n\n pdb.set_trace()", "def searchFeats(tree):\n if tree.results is None:\n #If I am at the branch, return my branching column and the columns of my\n #descendant branches.\n return set([tree.col]).union(searchFeats(tree.tb)).union(searchFeats(tree.fb))\n else: #If I am at a leaf, return an empty set\n return set()", "def values(self):\n return list(item.value for item in self.mechanisms)", "def create_quanti_cols(df: pd.DataFrame) -> list:\n\n # create a dictionary that contains datatype of each column\n dtypeDict = dict(df.dtypes)\n # create a list of column names that contains only quantitative data\n quanti_cols = []\n quali_cols = []\n for key, value in dtypeDict.items():\n if value == \"float64\" or value == \"int64\" or value == \"uint8\":\n quanti_cols.append(key)\n elif value == \"object\" or value == \"bool\":\n quali_cols.append(key)\n else:\n print(f\"No such dtypes values yet. Please add {value} in the function\")\n if len(quali_cols) == 1:\n return quanti_cols, quali_cols[0]\n else:\n return quanti_cols, quali_cols", "def get_singletons(self):\n singleton_list = [] # List holding singleton positions (row, col)\n for row in range(9):\n for col in range(9):\n if len(self.possible_values[row][col]) == 1 and self.final_values[row][col] == 0:\n singleton_list.append((row, col)) # If it is vacant and has only one possible value append it\n return singleton_list", "def multi_column_iterator(df, colidx):\n for _, values in df.iterrows():\n yield tuple([values.iloc[i] for i in colidx])", "def _get_list_select(self, column, key=None):\n if key is None:\n elements = [column]\n else:\n elements = [column, key]\n\n select = []\n for elem in elements:\n dot = elem.find('.')\n\n if dot >= 0:\n select.append(column[dot + 1:])\n else:\n select.append(elem)\n\n return select", "def values(self):\n return [i.value for i in self.value]", "def values(self):\n self._values = [[cell for cell in row]\n for row in self.sheet._op.values]\n # self._values = [cell for row in self.sheet._op.values for cell in row]\n return self._values", "def possibilities(board):\n return board[np.where(board == 0)]", "def columns(self):\n return self.__column_list", "def splittable_variables(self) -> List[int]:\n #print(\"enter bartpy/bartpy/data.py CovariateMatrix splittable_variables\")\n \n for i in range(0, self._n_features):\n if self._splittable_variables[i] is None:\n self._splittable_variables[i] = is_not_constant(self.get_column(i))\n \n output = [i for (i, x) in enumerate(self._splittable_variables) if x is True] \n #print(\"-exit bartpy/bartpy/data.py CovariateMatrix splittable_variables\")\n return output", "def list_value(self) -> global___Expression.RepeatedValue:", "def same_as(self, rows: List[Row], column: Column) -> List[Row]:\n return_list: List[Row] = []\n if not rows:\n return return_list\n cell_value = rows[0].values[column.name]\n for table_row in self.table_data:\n new_cell_value = table_row.values[column.name]\n if new_cell_value is None or not isinstance(new_cell_value, type(cell_value)):\n continue\n if new_cell_value == cell_value:\n return_list.append(table_row)\n return return_list", "def derived_columns_from_col(self, column: str) -> List[str]:\n derived_columns = []\n operations = self.operations_from_original_column(column)\n\n for operation in operations:\n if operation.derived_columns is not None:\n derived_columns.extend(operation.derived_columns)\n\n return derived_columns", "def get_uniqueZIP(df):\n return df.ZIP.unique().tolist()", "def valuerefs(self):\r\n return self.data.values()", "def list_data(self, as_strings=False):\n if self.df is None:\n return [] \n if as_strings:\n return [str(col) for col in self.df.columns]\n else:\n return list(self.df.columns.values)", "def collect_columns():\n return ((x, y) for x in range(72) for y in range(x + 9, 81, 9))" ]
[ "0.6171805", "0.6113757", "0.5891988", "0.5879397", "0.58606875", "0.5812586", "0.57938266", "0.5787178", "0.57815653", "0.5762439", "0.5762439", "0.56766194", "0.56144273", "0.5601676", "0.5587507", "0.5523211", "0.55008084", "0.5450795", "0.5401311", "0.5399768", "0.5390941", "0.53854376", "0.53706497", "0.53641474", "0.5336809", "0.53178674", "0.5315018", "0.5261834", "0.5232556", "0.5215963", "0.52067995", "0.5189249", "0.51807314", "0.51803356", "0.51796204", "0.51703554", "0.5169155", "0.5159863", "0.51522994", "0.51503915", "0.5137492", "0.5117347", "0.5114497", "0.51099867", "0.5108429", "0.5098281", "0.5092918", "0.5080469", "0.50772536", "0.50750583", "0.5058026", "0.5050726", "0.50492245", "0.5032458", "0.50289047", "0.5023505", "0.5018752", "0.50102144", "0.49994266", "0.4995349", "0.49641064", "0.49637222", "0.49589136", "0.49572968", "0.49528337", "0.49428815", "0.4942572", "0.49417487", "0.4927669", "0.49205482", "0.49150968", "0.48977417", "0.4886366", "0.48827532", "0.48818782", "0.48780861", "0.48777166", "0.4877588", "0.4874683", "0.48718584", "0.4865273", "0.48636124", "0.48612034", "0.4856821", "0.48566607", "0.48510742", "0.4849835", "0.4828566", "0.4822694", "0.48144633", "0.48143354", "0.4811204", "0.4810101", "0.4806187", "0.48011974", "0.48000956", "0.47969958", "0.4795167", "0.47917622", "0.478515" ]
0.6156369
1
Draw multiple lines y(x) using data from the dfs list on the ax subplot.
def draw_plot(ax, dfs, legend, x, y, xscale, yaxis_max): xticks = dfs_all_values(dfs, x) # loop over all pandas.DataFrame objects for df in dfs: # setting the x-column as an index is required to draw the y-column # as a function of x argument df = df.set_index(x) # plot line on the subplot df[y].plot.line(ax=ax, rot=45, marker='.') if xscale == "linear": ax.set_xscale(xscale) else: ax.set_xscale(xscale, base=2) ax.xaxis.set_major_formatter(ScalarFormatter()) ax.set_xticks(xticks) ax.set_xlabel(get_label(x)) ax.set_ylabel(get_label(y)) ax.set_ylim(bottom=0) if yaxis_max is not None: ax.set_ylim(top=float(yaxis_max)) ax.legend(legend, fontsize=6) ax.grid(True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot(x, y, *dfs):\n ax = None\n for df in dfs:\n ax = df[[x, y]].set_index(x).plot(kind='line', ylim=(0, None), xlim=(0, None), ax=ax)", "def update_plot(self,ax):\n for i,line in enumerate(self.lines):\n line.set_ydata(self.data[i].f)\n for line in self.lines: \n ax.draw_artist(line)", "def replot(self,ax):\n for i,line in enumerate(self.lines):\n line.set_ydata(self.data[i].f)\n line.set_xdata(self.data[i].x)\n for line in self.lines: \n ax.draw_artist(line)", "def plot_datasets(datasets):\n\n\t# plt.grid(True)\n\n\tfor ds in datasets:\n\t\t(f, ax) = plt.subplots()\n\n\t\tax.grid(True)\n\n\t\tif 'xl' in ds:\n\t\t\tax.set_xlabel(ds['xl'])\n\t\tif 'yl' in ds:\n\t\t\tax.set_ylabel(ds['yl'])\n\n\t\tif 'xl' in ds and 'yl' in ds:\n\t\t\ttitle = \"%s from %s\" % (ds['yl'], ds['xl'])\n\t\t\tf.canvas.set_window_title(title)\n\n\t\tif 'x' in ds:\n\t\t\ttitle = \"%s from %s\" % (ds['yl'], ds['xl']) if 'title' not in ds else ds['title']\n\t\t\tf.canvas.set_window_title(title)\n\t\t\tmarker = 'y1m' in ds and ds['y1m'] or None\n\t\t\tax.plot(ds['x'], ds['y'], label=ds['yl'], marker=marker)\n\t\tif 'x2' in ds:\n\t\t\t# label = \"y2\" if 'y2l' not in ds else ds['y2l']\n\t\t\tlabel = 'y2l' in ds and ds['y2l'] or 'y2'\n\t\t\tmarker = 'y2m' in ds and ds['y2m'] or None\n\t\t\tax.plot(ds['x2'], ds['y2'], label=label, marker=marker)\n\t\t\tax.legend()\n\t\tif 'x3' in ds:\n\t\t\t# label = \"y3\" if 'y3l' not in ds else ds['y3l']\n\t\t\tlabel = 'y3l' in ds and ds['y3l'] or 'y3'\n\t\t\tmarker = 'y3m' in ds and ds['y3m'] or None\n\t\t\tax.plot(ds['x3'], ds['y3'], label=label, marker=marker)\n\t\t\tax.legend()\n\n\t\tif 'sub' in ds:\n\t\t\tfor sub in ds['sub']:\n\t\t\t\t# ax.set_ylabel(sub['yl'])\n\t\t\t\t# ax.set_xlabel(sub['xl'])\n\t\t\t\t# title = \"%s from %s\" % (sub['yl'], sub['xl']) if 'title' not in sub else sub['title']\n\t\t\t\t# f.canvas.set_window_title(title)\n\n\t\t\t\tlabel = 'yl' in sub and sub['yl']\n\t\t\t\tmarker = 'ym' in sub and sub['ym'] or None\n\t\t\t\tax.plot(sub['x'], sub['y'], label=label, marker=marker)\n\t\t\t\tax.legend()\n\n\t\tax.spines['left'].set_position('zero')\n\t\tax.spines['bottom'].set_position('zero')\n\t\tax.spines['left'].set_smart_bounds(True)\n\t\tax.spines['bottom'].set_smart_bounds(True)\n\n\tplt.show()", "def plot_dat_file(dat_paths: [str]):\n import pandas as pd\n import matplotlib.pyplot as plt\n\n fig, ax = plt.subplots(1, 3, sharey=\"all\", sharex=\"col\", figsize=(8, 6))\n for i, dat_path in enumerate(dat_paths):\n if i == i:\n skipfoot = 11 + 9\n else:\n skipfoot = 11\n dat_file = pd.read_csv(\n dat_path,\n skiprows=3,\n skipfooter=skipfoot,\n header=None,\n delim_whitespace=True,\n engine=\"python\",\n )\n depth = dat_file.values[:, 0]\n vp = dat_file.values[:, 1]\n vs = dat_file.values[:, 3]\n dens = dat_file.values[:, 5]\n\n ax[0].plot(vp, depth, label=f\"nr {i}\")\n\n ax[1].plot(vs, depth)\n ax[2].plot(dens, depth)\n ax[0].set_ylim(ax[0].get_ylim()[::-1])\n ax[0].legend()\n plt.show()", "def plot_lines(self):\n self.plot(3)", "def multi_line_plot(x_data, y_data, title, x_label, y_label):\n plt.figure(1, (18, 8)) # something, plot size\n plt.subplot(111)\n legend = []\n for i in range(len(x_data)):\n plt.plot(x_data[i], y_data[i])\n legend.append((i+1))\n plt.title(title)\n plt.xlabel(x_label, fontsize=12)\n plt.ylabel(y_label, fontsize=12)\n plt.legend(legend, loc='upper left')\n plt.show()", "def plot(self, *args, **kwargs):\r\n lines = super(RadarAxes, self).plot(*args, **kwargs)\r\n for line in lines:\r\n self._close_line(line)", "def plot(self, *args, **kwargs):\n lines = super(RadarAxes, self).plot(*args, **kwargs)\n for line in lines:\n self._close_line(line)", "def plot(self, *args, **kwargs):\n lines = super(RadarAxes, self).plot(*args, **kwargs)\n for line in lines:\n self._close_line(line)", "def plotLines( self ):\n \n ## plot tree in dfs manner\n def plotLines( node_id ):\n\n node = self.mTree.node( node_id )\n\n left = self.mNodeWidthsStart[node_id]\n right = self.mNodeWidthsEnd[node_id]\n height = self.mNodeHeights[node_id] \n\n if right != left and node_id != self.mTree.root:\n self.addElements( self.mDecoratorHorizontalBranches.getElements(\n node_id,\n self.getHeaderWidth() + left,\n self.getHeaderWidth() + right,\n self.getHeaderHeight() + height ))\n \n\n for s in node.succ:\n\n new_height = self.mNodeHeights[s]\n self.addElements( self.mDecoratorVerticalBranches.getElements(\n node_id,\n self.getHeaderWidth() + right,\n self.getHeaderHeight() + height,\n self.getHeaderHeight() + new_height ))\n \n TreeTools.TreeDFS( self.mTree, self.mTree.root,\n pre_function = plotLines )", "def multi_plot(data, fname=None):\n for entry in data['data']:\n plt.plot(entry['x'], entry['y'], label=entry['label'])\n\n plt.title(data['title'])\n plt.xlabel(data['x_label'])\n plt.ylabel(data['y_label'])\n\n #plt.legend(loc='best')\n\n Plotter.show(data['title'], fname=fname)", "def replot(self,ax):\n self.XP_Plotter.replot(ax)\n # theoretical lines\n self.lines_theory[0].set_xdata(self.xx)\n self.lines_theory[1].set_xdata(self.xx)\n self.lines_theory[2].set_xdata(self.xx_itpl)\n for line in self.lines_theory: \n ax.draw_artist(line)", "def update_plot(frame):\n global plotdata\n while True:\n try:\n data = q.get_nowait()\n except queue.Empty:\n break\n shift = len(data)\n plotdata = np.roll(plotdata, -shift, axis=0)\n plotdata[-shift:, :] = data\n for column, line in enumerate(lines):\n line.set_ydata(plotdata[:, column])\n return lines", "def plot2D(*dfs, columns=None, figsize=(5, 5), plot_titles=False):\n fig, ax = plt.subplots(figsize=figsize)\n\n for df, color in zip(dfs, cycle(COLORS)):\n X, Y = (df[col] for col in columns)\n plt.scatter(X, Y, c=color, marker=MARKER)\n\n for axis, col in zip(['x', 'y'], columns):\n getattr(ax, f'set_{axis}label')(col)\n\n if plot_titles:\n for df in dfs:\n for i, j, text in zip(df.iloc[:, 0], df.iloc[:, 1], df.index):\n corr = 2\n ax.annotate(text, xy=(i + corr, j + corr))\n\n plt.show()", "def plot_epochs(epochs, y, line):\n ep = np.arange(0, epochs)\n if hasattr(y[0], '__len__'):\n for i in range(len(y[0])):\n plt.plot(ep, [val[i] for val in y], line[i])\n else:\n plt.plot(ep, y, line)\n plt.show()", "def plot_mult_timetrends(data, geoids, cols, area, colors, markers, sharex,\n ylim_bottom = -150, ylim_top = 150, ylabel = 'Pct change in mobility', xlabels=None):\n ax = plt.axes(area, sharex = None)\n \n cols = cols\n plt.hlines(0,data.num_date.min(),data.num_date.max())\n i = 0\n for y in cols:\n pts = y[:12]\n \n# lim = ylim\n# plt.xlabel('date', fontsize=18)\n plt.ylabel(ylabel, fontsize=22)\n\n plt.yticks(fontsize=30) \n\n x_locator = FixedLocator(data.num_date[np.arange(0,data.shape[0],7)].tolist())\n ax.xaxis.set_minor_locator(x_locator)\n plt.grid(axis='x', which = 'both') \n \n plt.plot(data['num_date'], data[y], color = colors[i], linewidth=5)\n i = i+ 1\n plt.xticks(ticks = data.num_date[np.arange(0,data.shape[0],28)].tolist(),\n labels = xlabels, rotation=30, ha='right',\n fontsize=30)\n plt.ylim(ylim_bottom,ylim_top)\n\n return ax", "def func_double_yaxis_data_subplot_show(data_plot_cfg_dic_nlst, axes_cfg_dic_lst,\n nrow, ncol, x_label, y1_label, y2_label,\n sub_titles=None, anno_text_lst=None, fig_title_lst=None,\n fig_size=None, subplot_fit_rect=None):\n\n # Create a figure\n fig, axs = plt.subplots(nrow, ncol, figsize=fig_size)\n\n # Plot the double-axis data for each subplot\n for index, ax in enumerate(axs.flat):\n ax1, ax2 = func_double_yaxis_data_plot(ax, data_plot_cfg_dic_nlst[index], axes_cfg_dic_lst,\n x_label, y1_label, y2_label)\n\n # Config the figure\n if index == 0:\n fontsize_label = axes_cfg_dic_lst[0].get('fontsize_label', 14)\n ax1.set_ylabel(y1_label, color='k', fontsize=fontsize_label)\n ax2.label_outer() # It seems label_outer() doesn't work for ax2, so I remove ytick labels manually\n ax2.set_yticklabels([])\n elif index == (ncol - 1):\n fontsize_label = axes_cfg_dic_lst[1].get('fontsize_label', 14)\n ax2.set_ylabel(y2_label, color='k', fontsize=fontsize_label)\n ax1.label_outer()\n\n ax1.get_legend().remove() # Remove individual legend for each subplot\n ax2.get_legend().remove() # Remove individual legend for each subplot\n # ax1.label_outer()\n # ax2.label_outer()\n\n # Define appearance\n func_matlab_style(ax)\n\n if fig_title_lst is not None:\n ax.set_title(fig_title_lst[index], fontweight='bold')\n if sub_titles is not None:\n ax.text(-25, -43, sub_titles[index], fontsize=11, fontweight='bold')\n if anno_text_lst is not None:\n ax.text(axes_cfg_dic_lst[0]['xlim'][0]+10, -5, anno_text_lst[index], fontsize=8)\n # ax.set_aspect('equal')\n\n ax1_handles, ax1_labels = ax1.get_legend_handles_labels()\n ax2_handles, ax2_labels = ax2.get_legend_handles_labels()\n handles = ax1_handles + ax2_handles\n labels = ax1_labels + ax2_labels\n fontsize_legend = axes_cfg_dic_lst[0].get('fontsize_legend', 12)\n fig.legend(handles, labels, ncol=4, loc='lower center', prop={'size': fontsize_legend})\n\n fig.tight_layout(rect=subplot_fit_rect) # otherwise the right y-label is slightly clipped\n plt.show()", "def plot_data_matplotlib(df_data):\n # creating the figure and subplots as two rows and one column\n fig, ax = plt.subplots(2, 1)\n # defining the colours used for the plots and y axes\n red = \"#DA2525\"\n blue = \"#003A78\"\n # setting up the subplots to share the x axis\n # ax02 is the second y axis of the first subplot\n ax02 = ax[0].twinx()\n # ax12 is the second y axis of the second subplot\n ax12 = ax[1].twinx()\n # the global co2 line plot\n line1 = ax[0].plot(\n df_data.index,\n df_data[\"global_co2\"],\n label=\"Global $CO_2$ Emissions\",\n color=blue\n )\n # the global temperature line plot\n line2 = ax02.plot(\n df_data.index,\n df_data[\"global_temp\"],\n label=\"Global Temperature Anomaly\",\n color=red\n )\n # the uk co2 line plot\n line3 = ax[1].plot(\n df_data.index,\n df_data[\"uk_co2\"],\n label=\"UK $CO_2$ Emissions\",\n color=blue\n )\n # the uk temperature line plot\n line4 = ax12.plot(\n df_data.index,\n df_data[\"uk_temp\"],\n label=\"UK Surface Temperature\",\n color=red\n )\n # the next three dataframes are used to indicate where there are gaps in\n # the data, which I will use to produce a shaded region to highlight this\n # fact\n # for the global temperature data\n global_temp_nan = df_data[pd.isna(df_data[\"global_temp\"])]\n # for the UK temperature data\n uk_temp_nan = df_data[pd.isna(df_data[\"uk_temp\"])][:-1]\n # for the UK co2 emissions data\n uk_co2_nan = df_data[pd.isna(df_data[\"uk_co2\"])][:-2]\n # creating a shaded region to show the missing global temperature data\n ax[0].axvspan(\n global_temp_nan.index[0],\n global_temp_nan.index[-1],\n alpha=0.1,\n color=\"black\"\n )\n # creating a shaded region to show the missing UK co2 data\n ax[1].axvspan(\n uk_temp_nan.index[0],\n uk_co2_nan.index[-1],\n alpha=0.1,\n color=\"black\"\n )\n # creating a shaded region to show the missing UK temperature data\n ax[1].axvspan(\n uk_co2_nan.index[-1],\n uk_temp_nan.index[-1],\n alpha=0.05,\n color=\"black\"\n )\n # setting titles for the figure and subplots\n ax[0].set_title(\"{}{}{}\".format(\n \"Global and UK \",\n \"$CO_2$ Emissions and Surface Temperature over Time\",\n \"\\n\\nGlobal\"))\n ax[1].set_title(\"UK\")\n # setting axes labels\n ax[1].set_xlabel(\"Time (years)\")\n ax[0].set_ylabel(\"$CO_2$ Emissions (Tg)\", color=blue)\n ax02.set_ylabel(\"Temperature Anomaly (°C)\", color=red)\n ax[1].set_ylabel(\"$CO_2$ Emissions (Tg)\", color=blue)\n ax12.set_ylabel(\"Temperature (°C)\", color=red)\n # setting x axes limits so both subplots are over the same range\n ax[0].set_xlim((df_data.index[0], df_data.index[-1]))\n ax[1].set_xlim((df_data.index[0], df_data.index[-1]))\n # setting the x axes tick values\n ax[0].set_xticks([d for d in df_data.index if d.year % 20 == 0])\n ax[1].set_xticks([d for d in df_data.index if d.year % 20 == 0])\n # setting y axes colours to match the line plots\n ax[0].tick_params(\"y\", colors=blue)\n ax02.tick_params(\"y\", colors=red)\n ax[1].tick_params(\"y\", colors=blue)\n ax12.tick_params(\"y\", colors=red)\n # annotating the shaded regions\n ax[0].annotate(\n \"No temperature data available\",\n (\"1760-01-01\", 4000)\n )\n ax[1].annotate(\n \"No data available\",\n (\"1760-01-01\", 300)\n )\n ax[1].annotate(\n \"No temperature data available\",\n (\"1850-01-01\", 500)\n )\n # setting the legends \n ax[0].legend(\n line1 + line2,\n [\n line1[0].get_label(),\n line2[0].get_label(),\n ],\n loc=2\n )\n ax[1].legend(\n line3 + line4,\n [\n line3[0].get_label(),\n line4[0].get_label()\n ],\n loc=2\n )\n plt.show()\n return", "def plot_subplots(x_list, y_list, z_list):\n # create a line chart with the average rating of the top movies per year\n # min rating = 0 and max = 10\n plot1 = plt.subplot(211)\n plt.plot(x_list, y_list, color = 'lightseagreen')\n plt.axis([START_YEAR, END_YEAR - 1, 0, 10])\n plt.title('Average IMDB Movie Rating per Year', fontsize=12)\n plt.ylabel('Average Rating')\n plt.grid(True)\n\n # make x ticklabels of plot1 invisible\n plt.setp(plot1.get_xticklabels(), visible=False)\n\n # adjust space between subplots\n plt.subplots_adjust(hspace=0.3)\n\n # create a line chart with the average runtime with shared x-axis\n plot2 = plt.subplot(212, sharex=plot1)\n plt.plot(x_list, z_list, color = 'lightseagreen')\n plt.title('Average IMDB Movie Runtime per Year', fontsize=12)\n plt.ylabel('Average Runtime (min)')\n plt.grid(True)\n\n # define axes, with all years (2008 till 2017) on the x-axis\n # min runtime = 0, max runtime = 180\n plt.axis([START_YEAR, END_YEAR - 1, 0, 180])\n plt.xticks(x_list)\n plt.xlabel('Year')\n\n # plot both the subplots\n plt.show()", "def dline_tdepl_array(lines,**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n fig, axs = plt.subplots(len(lines), sharex='col',\\\n figsize=(6,15),facecolor='w',\\\n gridspec_kw={'hspace': 0, 'wspace': 0})\n\n for i,ax in enumerate(axs):\n\n dline_tdepl(line=lines[i],ax=ax,select=p.select,sim_run=p.sim_runs[1],nGal=p.nGals[1],add=True,cb=True)\n dline_tdepl(line=lines[i],ax=ax,select=p.select,sim_run=p.sim_runs[0],nGal=p.nGals[0],add=True,cb=False)\n\n plt.tight_layout()\n\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'luminosity/'): os.mkdir(p.d_plot + 'luminosity/') \n plt.savefig(p.d_plot + 'luminosity/dlines_tdepl_array_%s%s%s_%s%s_%s.png' % (p.ext,p.grid_ext,p.table_ext,p.sim_name,p.sim_run,p.select), format='png', dpi=300)", "def etio_subplot(df, ax, title, graph_color='skyblue'):\n\n post_dx_histo = histo_dx_includes(df)\n hist_df = pd.DataFrame({\"Dx\": post_dx_histo.index, \"Count\": post_dx_histo.data})\n #hist_df = hist_df.drop(1)\n print(hist_df)\n\n graph_range = range(1,len(hist_df.index)+1)\n ax.hlines(y=graph_range, xmin=0, xmax=hist_df['Count'], color=graph_color)\n ax.plot(hist_df['Count'], graph_range, \"D\", color=graph_color)\n ax.set_yticks(range(1, len(hist_df['Dx'])+1))\n ax.set_yticklabels(hist_df['Dx'], fontsize='10')\n\n ax.set_title(title, fontsize='10')\n return ax", "def seas_line_subplot(rows, cols, df, fwd=None, **kwargs):\n fig = make_subplots(\n cols=cols,\n rows=rows,\n specs=[[{\"type\": \"scatter\"} for x in range(0, cols)] for y in range(0, rows)],\n subplot_titles=kwargs.get(\"subplot_titles\", None),\n )\n\n chartcount = 0\n for row in range(1, rows + 1):\n for col in range(1, cols + 1):\n # print(row, col)\n if chartcount > len(df):\n chartcount += 1\n continue\n\n dfx = df[df.columns[chartcount]]\n fwdx = None\n if fwd is not None and len(fwd) > chartcount:\n fwdx = fwd[fwd.columns[chartcount]]\n\n showlegend = True if chartcount == 0 else False\n\n traces = cptr.seas_plot_traces(\n dfx, fwd=fwdx, showlegend=showlegend, **kwargs\n )\n\n for trace_set in [\"shaded_range\", \"hist\", \"fwd\"]:\n if trace_set in traces:\n for trace in traces[trace_set]:\n fig.add_trace(trace, row=row, col=col)\n\n chartcount += 1\n\n legend = go.layout.Legend(font=dict(size=10))\n fig.update_xaxes(\n tickvals=pd.date_range(start=str(dates.curyear), periods=12, freq=\"MS\"),\n tickformat=\"%b\",\n )\n title = kwargs.get(\"title\", \"\")\n fig.update_layout(\n title=title,\n title_x=0.01,\n xaxis_tickformat=\"%b\",\n legend=legend,\n margin=preset_margins,\n )\n return fig", "def _plot(self, df, head, title, lines, verbose: bool = False):\n fig = go.Figure(layout=set_layout())\n\n if isinstance(lines, str):\n lines = [lines]\n elif not isinstance(lines, list):\n raise ValueError(\"Only string or list is valid type for lines.\")\n\n for n in lines:\n fig.add_trace(self._plot_line(df, head=head, y=n, line_name=n.upper()))\n\n if verbose:\n fig.add_trace(self._plot_stock_data(self._df, head))\n\n fig.update_layout(\n title_text=f\"{title} Chart ({self.stock_code})\",\n xaxis_rangeslider_visible=False,\n )\n fig.show()", "def draw_lists_pyplot(y_array, line_weight=3, learnig_rate=1):\n y = y_array\n plt.plot(y, lw=line_weight, label='cost(a={:})'.format(learnig_rate))\n plt.legend()\n\n plt.title(\"Gradient Descent Optimizing Method\\nminimize cost function\")\n plt.xlabel('time-itoration')\n plt.ylabel('cost-function')\n\n plt.xlim(0,)\n plt.ylim(0,)\n\n plt.grid(b=None, which='major', axis='both')\n plt.show()", "def draw_lists_pyplot(y_array, line_weight=3, learnig_rate=1):\n y = y_array\n plt.plot(y, lw=line_weight, label='cost(a={:})'.format(learnig_rate))\n plt.legend()\n\n plt.title(\"Gradient Descent Optimizing Method\\nminimize cost function\")\n plt.xlabel('time-itoration')\n plt.ylabel('cost-function')\n plt.xlim(0,)\n plt.ylim(0,)\n\n plt.grid(b=None, which='major', axis='both')\n plt.show()", "def dyplot(self, x, y, name, dir):\n fig, ax1 = plt.subplots(figsize=(6, 4), dpi=500, facecolor='white')\n ax1.plot(x, '-b*', ms=2, linewidth=1)\n ax1.set_xlabel('Epoch', fontsize=9)\n ax1.set_ylabel('Discriminator Loss per Epoch', fontsize=9, color='b')\n ax1.tick_params('y', colors='b')\n\n ax2 = ax1.twinx()\n ax2.plot( y, '-r*', ms=2, linewidth=1)\n ax2.set_ylabel('Generator Loss per Epoch', fontsize=9, color='r')\n ax2.tick_params('y', colors='r')\n fig.tight_layout()\n plt.savefig('{}/{}.png'.format(dir, 'Loss-Adversarial-' + name))\n plt.close()", "def dline_dSFR_array(lines,**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n fig, axs = plt.subplots(len(lines), sharex='col',\\\n figsize=(6,15),facecolor='w',\\\n gridspec_kw={'hspace': 0, 'wspace': 0})\n\n for i,ax in enumerate(axs):\n\n dline_dSFR(line=lines[i],ax=ax,select=p.select,sim_run=p.sim_runs[1],nGal=p.nGals[1],add_obs=p.add_obs,MS=p.MS,add=True,cb=True)\n dline_dSFR(line=lines[i],ax=ax,select=p.select,sim_run=p.sim_runs[0],nGal=p.nGals[0],add_obs=False,add=True,cb=False)\n\n plt.tight_layout()\n\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'luminosity/'): os.mkdir(p.d_plot + 'luminosity/') \n plt.savefig(p.d_plot + 'luminosity/dlines_dSFR_array_%s%s%s_%s%s_%s.png' % (p.ext,p.grid_ext,p.table_ext,p.sim_name,p.sim_run,p.select), format='png', dpi=300)", "def multiplot(self, x, y, **kwargs):\n\n # --------------------------------------------------------------------------------------------- #\n # Attributes\n self._evalKwargs(kwargs)\n # Remove the previous and create the new framework\n plt.delaxes(self.ax)\n count = 0\n colcount = 0\n # Get the min and max values of the X-axis\n xmin = []\n xmax = []\n for i in range( len(x) - 1):\n if hasattr(x[i][0], \"__len__\"):\n for j in range( len(x[i]) - 1):\n xmin.append( min(x[i][j]) )\n xmax.append( max(x[i][j]) )\n else:\n xmin.append( min(x[i]) )\n xmax.append( max(x[i]) )\n if self.xmin is not None:\n xmin = [self.xmin]\n if self.xmax is not None:\n xmax = [self.xmax]\n deltaX = max(xmax) - min(xmin)\n xmin = min(xmin) - 0.05*deltaX\n xmax = max(xmax) + 0.05*deltaX\n\n # --------------------------------------------------------------------------------------------- #\n # Iterate over the number of subplots \n for nSP in range( len(self.prop) ):\n # --------------------------------------------------------------------------------------------- #\n # Initialize the subplot properties\n self.ax = plt.subplot2grid( (sum(self.prop), 1), (count, 0), rowspan=self.prop[nSP])\n count += self.prop[nSP] # Keep track of the size of the plot\n # Extract the errors if any are given\n if self.yerr is not None:\n yerrSP = self.yerr[nSP]\n if self.xerr is not None:\n xerrSP = self.xerr[nSP] \n # Set the y-axis and x-axis scales\n try:\n ymode = self.ymode[colcount]\n except:\n ymode = self.ymode\n self.ax.set_yscale(ymode)\n self.ax.set_xscale(self.xmode)\n\n # --------------------------------------------------------------------------------------------- #\n # Iterate over the different curves to plot in the same subplot\n if hasattr(y[nSP][0], \"__len__\"):\n for nCurv in range( len(y[nSP]) ):\n # Read the plot properties\n try: color = self.color[colcount]\n except: color = self.color\n try: mksize = self.mksize[colcount]\n except: mksize = self.mksize\n try: alpha = self.alpha[colcount]\n except: alpha = self.alpha\n try: ncol = self.ncol[colcount]\n except: ncol = self.ncol\n try: loc = self.loc[colcount]\n except: loc = self.loc\n try: legend = self.label[colcount]\n except: legend = self.label \n try: lstyle = self.lstyle[colcount]\n except: lstyle = self.lstyle\n try: mktype = self.mktype[colcount]\n except : mktype= self.mktype\n\n # Extract the errors if any are given\n if (self.yerr is not None) and (hasattr(self.yerr[nSP][nCurv], \"__len__\")):\n yerrnCurv = self.yerr[nSP][nCurv]\n else:\n yerrnCurv = None\n if (self.xerr is not None) and (hasattr(self.xerr[nSP][nCurv], \"__len__\")):\n xerrnCurv = self.xerr[nSP][nCurv] \n else:\n xerrnCurv = None\n\n # Plot limits as down-arraows\n if (self.limit is not None) and (self.limit[nSP][nCurv]):\n self.ax.errorbar(x[nSP][nCurv], y[nSP][nCurv], xerr=xerrnCurv, \n yerr=[yerrnCurv, np.zeros( len(yerrnCurv) )], fmt='none', \n ecolor=color, elinewidth=0.5, alpha=alpha, capsize=0, \n barsabove=False, lolims=False, uplims=False, xlolims=False, \n xuplims=False, errorevery=1, capthick=None, zorder=nCurv, legend=None)\n self.ax.plot(x[nSP][nCurv], y[nSP][nCurv]-yerrnCurv, marker='v',\n color=color, alpha=alpha, markersize=mksize, linestyle='',\n markeredgecolor=color, zorder=nCurv)\n # Fill an area between y[nSP][0][0] and y[nSP][0][1]\n #elif hasattr(y[nSP][nCurv], \"__len__\"):\n # self.ax.fill_between(x[nSP][nCurv], y[nSP][nCurv][0], y[nSP][nCurv][1], facecolor=self.color, edgecolor='none', alpha=0.5,\n # rasterized=self.raster, zorder=-10)\n # Plot a 'normal' curve\n else:\n if (legend is not None) and (legend != 'None') :\n graph = self.ax.errorbar(x[nSP][nCurv], y[nSP][nCurv], yerr=yerrnCurv, \n xerr=xerrnCurv, fmt=mktype, ecolor=color, elinewidth=0.5, capsize=0,\n linestyle=lstyle, markerfacecolor=color, markeredgecolor=color, \n color=color, markersize=mksize, label=legend, linewidth=self.lwdth, \n barsabove=False, errorevery=1, capthick=None, alpha=alpha, zorder=nCurv)\n # Handling of the labels of the curves\n handles, labels = self.ax.get_legend_handles_labels()\n handle_list, label_list = [], []\n for k in xrange( len(labels) ):\n if labels[k] in self.label:\n handle_list.append(handles[k])\n label_list.append(labels[k])\n self.ax.legend(handle_list, label_list, loc=\"best\", prop={'size':self.ftsize2},\n frameon=True, numpoints=1, ncol=ncol, handletextpad=0.1)\n else:\n graph = self.ax.errorbar(x[nSP][nCurv], y[nSP][nCurv], yerr=yerrnCurv,\n xerr=xerrnCurv, fmt=mktype, ecolor=color, elinewidth=0.5, capsize=0,\n linestyle=lstyle, markerfacecolor=color, markeredgecolor=color, \n color=color, markersize=mksize, alpha=alpha, linewidth=self.lwdth,\n barsabove=False, errorevery=1, capthick=None, zorder=nCurv)\n colcount += 1\n # --------------------------------------------------------------------------------------------- #\n # There is only one curve per subplot\n else:\n # Read the plot properties\n try: color = self.color[colcount]\n except: color = self.color\n try: mksize = self.mksize[colcount]\n except: mksize = self.mksize\n try: alpha = self.alpha[colcount]\n except: alpha = self.alpha\n try: ncol = self.ncol[colcount]\n except: ncol = self.ncol\n try: loc = self.loc[colcount]\n except: loc = self.loc\n try: legend = self.label[colcount]\n except: legend = self.label \n try: lstyle = self.lstyle[colcount]\n except: lstyle = self.lstyle\n try: mktype = self.mktype[colcount]\n except : mktype= self.mktype\n\n # Extract the errors if any are given\n if (self.yerr is not None) and (hasattr(self.yerr[nSP], \"__len__\")):\n yerrSP = self.yerr[nSP]\n else:\n yerrSP = None\n if (self.xerr is not None) and (hasattr(self.xerr[nSP], \"__len__\")):\n xerrSP = self.xerr[nSP] \n else:\n xerrSP = None\n # Plot\n if (self.limit is not None) and (self.limit[nSP]):\n self.ax.errorbar(x[nSP], y[nSP], xerr=xerrSP, \n yerr=[yerrSP, np.zeros( len(yerrSP) )], fmt='none', \n ecolor=color, elinewidth=0.5, alpha=alpha, capsize=0, \n barsabove=False, lolims=False, uplims=False, xlolims=False, \n xuplims=False, errorevery=1, capthick=None, legend=None)\n self.ax.plot(x[nSP], y[nSP]-yerrSP, marker='v',\n color=color, alpha=alpha, markersize=mksize, linestyle='',\n markeredgecolor=color)\n else:\n self.ax.errorbar(x[nSP], y[nSP], yerr=yerrSP, xerr=xerrSP, fmt=mktype, ecolor=color,\n elinewidth=0.5, capsize=0, linestyle=lstyle, markerfacecolor=color, \n markeredgecolor=color, markersize=mksize, label=legend, alpha=alpha, color=color,\n barsabove=False, errorevery=1, capthick=None)\n colcount += 1\n if legend is not None:\n # Handling of the labels of the curves\n self.ax.legend(loc=\"best\", prop={'size':self.ftsize2}, frameon=True, numpoints=1,\n ncol=ncol, handletextpad=0.1)\n handles, labels = self.ax.get_legend_handles_labels()\n handle_list, label_list = [], []\n for k in xrange(len(labels)):\n if labels[k] in self.label:\n handle_list.append(handles[k])\n label_list.append(labels[k])\n self.ax.legend(handle_list, label_list, loc=\"best\", prop={'size':self.ftsize2}, \n frameon=True, numpoints=1, ncol=ncol, handletextpad=0.1)\n\n # --------------------------------------------------------------------------------------------- #\n # Make pretty each subplot\n\n # Shift the x-label\n self.ax.yaxis.set_label_coords(self.labelx, 0.5)\n # Set the y-label for each subplot\n self.ax.set_ylabel(self.ylabel[nSP], fontsize=self.ftsize1, multialignment='center')\n self._plotDisplay()\n\n # Dimensions\n self.ax.set_xlim(xmin, xmax) # Every subplot has the same x-axis \n ymin, ymax = self.ax.get_ylim()\n try: ymin = self.ymin[nSP]\n except: pass\n try: ymax = self.ymax[nSP]\n except: pass\n self.ax.set_ylim(ymin, ymax) \n\n # Draw a horizontal line\n if (self.hline is not None) and (self.hline[nSP] is not None):\n # Multiple h-line to draw\n self.ax.axhline(y=self.hline[nSP], color='black', linestyle=':')\n # Fill an area\n if self.fill is not None:\n #self.ax.fill_between(x[nSP][nCurv], y[nSP][nCurv][0], y[nSP][nCurv][1], facecolor=self.color, edgecolor='none', alpha=0.5,\n # rasterized=self.raster, zorder=-10)\n for k in range(len(self.fill)/2):\n self.ax.axvspan(self.fill[k*2], self.fill[k*2+1], facecolor=self.shadecol, \n edgecolor=\"none\", linewidth=0., zorder=-10, alpha=0.5)\n # For all upper subplot, remove the last ticks\n if nSP != len(self.prop)-1:\n plt.setp(self.ax.get_xticklabels(), visible=False)\n self.ax.set_xlabel('')\n ymincheck, ymaxcheck=self.ax.get_ylim()\n if ymaxcheck > ymincheck:\n self.ax.get_yticklabels()[0].set_visible(False)\n else: # in case of a revert y axis...\n self.ax.get_yticklabels()[-1].set_visible(False)\n\n self.f.subplots_adjust(hspace=0)", "def plot_many_y(x, y, yer=None, xlabel = None, ylabel = None, ynames = None, label = None, domain=None,\n yrange = None, undertext =None, savedir = None, marker=None, markerstyles=None, plotspecs = None, groupings=None,\n groupings_labels_within = None, vlines = None, legend_title=None, n_legend_columns=None, text=None, linestyles=None,\n colors=None, save=None):\n if save is None:\n save = True\n if savedir is None:\n save_dir = os.getcwd()\n else:\n save_dir = savedir\n if marker is None:\n marker = False\n if vlines is None:\n vlines = []\n if isinstance(vlines, float):\n vlines = [vlines]\n if n_legend_columns is None:\n n_legend_columns = 1\n\n if markerstyles is None:\n my_marker_styles = [st for st in marker_styles]\n else:\n my_marker_styles = [st for st in markerstyles]\n if groupings_labels_within is None:\n groupings_labels_within = False\n\n if linestyles is None:\n my_line_styles = [ls for ls in line_styles]\n else:\n my_line_styles = [ls for ls in linestyles]\n\n\n #in case linestyle -- comes up\n dashes = (10, 25)\n dashes = [20,55]\n dashes = [40, 40]\n dashes = [5, 5]\n dash_width_factor = 2\n dash_width_factor = 1.5\n\n number_y = len(y)\n\n if groupings is None:\n grouped = False\n #print([\"hi\" for _ in range(number_y_num)])\n groupings = [{ii} for ii in range(number_y)]\n else:\n grouped = True\n\n # Make sure all the elements are in a colour grouping\n if grouped:\n extra_group = set()\n for i in range(number_y):\n in_a_group = False\n for seti in groupings:\n for el in seti:\n if i == el:\n if not in_a_group:\n in_a_group = True\n #else:\n #print el, ' in two colour groups'\n if not in_a_group:\n extra_group.add(i)\n\n if len(groupings) == 1:\n if ynames is not None:\n if len(ynames) == number_y:\n grouped = False\n\n\n default_plot_specs = copy.deepcopy(default_plot_specs_all)\n default_plot_specs['legend_font'] = {'size': 8}\n default_plot_specs['legend_anchor'] = 'upper right'\n default_plot_specs['legend_loc'] = (0.98, -0.1)\n\n if marker:\n default_plot_specs['x_scale'] = 0.05\n else:\n default_plot_specs['x_scale'] = 0\n\n text_heights = [-0.023, -0.069, -0.115,-0.161]\n\n if plotspecs is not None:\n for stat in list(default_plot_specs.keys()):\n if stat in plotspecs:\n default_plot_specs[stat] = plotspecs[stat]\n\n the_label = ''\n\n if domain is not None:\n xlow = domain[0]\n xhigh = domain[1]\n for ii in range(number_y):\n klow = x[ii].index(find_nearest(x[ii],xlow))\n khigh = x[ii].index(find_nearest(x[ii], xhigh))\n #khigh = x[ii].index(find_nearest_above(x[ii], xhigh))\n x[ii] = x[ii][klow:khigh]\n y[ii] = y[ii][klow:khigh]\n if yer:\n yer[ii] = yer[ii][klow:khigh]\n if yrange is not None:\n ylow = yrange[0]\n yhigh = yrange[1]\n if xlabel is None:\n x_label = ''\n else:\n x_label = xlabel\n if ylabel is None:\n y_label = ''\n the_label = 'y_' +str(number_y) +'_'\n else:\n y_label = ylabel\n the_label += y_label[:4] +'_'\n if ynames is None:\n y_names = []\n else:\n y_names = ynames\n if label is None:\n the_label = the_label + 'vs_' +x_label\n else:\n the_label = label\n\n under_text = []\n if undertext is not None:\n under_text = undertext[:]\n\n if marker:\n rcParams['legend.numpoints'] = 1\n\n plt.clf()\n\n fig = plt.figure(figsize=default_plot_specs['fsize'], dpi=default_plot_specs['dpi'])\n ax_1 = fig.add_subplot(111)\n\n if default_plot_specs['xlog']:\n ax_1.set_xscale('log')\n if default_plot_specs['ylog']:\n ax_1.set_yscale('log')\n\n if grouped:\n mycolors = cm.rainbow(np.linspace(0, 1, len(groupings)))\n else:\n mycolors = cm.rainbow(np.linspace(0, 1, number_y))\n color_dict = dict()\n line_style_dict = dict()\n marker_style_dict = dict()\n\n\n ynames_dict = dict()\n custom_legend_entries_dict = dict()\n display_leg_numbers = []\n\n add_dummy_ynames = False\n if ynames is not None:\n if len(ynames) == len(groupings):\n if len(groupings) != len(y):\n # if only the first element of each group is named\n add_dummy_ynames = True\n if not groupings_labels_within:\n display_leg_numbers = [kk for kk in range(len(ynames))]\n elif not groupings_labels_within:\n display_leg_numbers = [kk for kk in range(len(ynames))]\n elif not groupings_labels_within:\n display_leg_numbers = [kk for kk in range(len(ynames))]\n\n\n for seti, jj in zip(groupings, range(len(groupings))):\n for k,ii in zip(sorted(list(seti)), range(len(seti))):\n #jj is the group number\n #ii is the number within the set\n #k is the number in the ylist\n if colors is None:\n if grouped:\n color_dict[k] = mycolors[jj]\n else:\n color_dict[k] = mycolors[k]\n\n else:\n if grouped:\n color_dict[k] = colors[jj]\n else:\n color_dict[k] = colors[k]\n if grouped:\n marker_style_dict[k] = my_marker_styles[ii]\n line_style_dict[k] = my_line_styles[ii]\n else:\n # print(k)\n # print(markerstyles)\n if markerstyles is None:\n marker_style_dict[k] = default_plot_specs['marker_style']\n else:\n marker_style_dict[k] = markerstyles[k]\n if linestyles is None:\n line_style_dict[k] = default_plot_specs['linestyle']\n else:\n line_style_dict[k] = linestyles[k]\n if add_dummy_ynames:\n if ii == 0: # if the first in the set\n ynames_dict[k] = ynames[jj]\n else:\n ynames_dict[k] = 'dummy'\n\n\n\n if groupings_labels_within:\n\n if ii == 0:\n display_leg_numbers.append(k)\n\n # Create custom artists\n if marker:\n markstyli = marker_style_dict[k]\n style = line_style_dict[k]\n if markstyli and not style:\n capsizi = default_plot_specs['cap_size']\n else:\n capsizi = None\n if line_style_dict[k] == '--':\n custom_legend_entries_dict[ii] = plt.Line2D((0, 1), (0, 0), color='k', marker=markstyli,\n markersize=default_plot_specs['marker_size'],\n dashes=dashes)\n else:\n custom_legend_entries_dict[ii] = plt.Line2D((0, 1), (0, 0), color='k', marker=markstyli,\n markersize=default_plot_specs['marker_size'],\n capsize=capsizi,\n linestyle=style,\n linewidth=default_plot_specs['linewidth'])\n else:\n if line_style_dict[k] == '--':\n custom_legend_entries_dict[ii] = plt.Line2D((0, 1), (0, 0), color='k', dashes=dashes,\n linewidth=dash_width_factor*default_plot_specs['linewidth'])\n else:\n custom_legend_entries_dict[ii] = plt.Line2D((0, 1), (0, 0), color='k',\n linestyle=style,\n linewidth=default_plot_specs['linewidth'])\n\n if add_dummy_ynames:\n ynames = [ynames_dict[k] for k in range(number_y)]\n # Create custom artists\n\n simArtist = plt.Line2D((0, 1), (0, 0), color='k', marker='o', linestyle='')\n anyArtist = plt.Line2D((0, 1), (0, 0), color='k')\n\n #print color_dict\n\n # print 'printing ynames in funct'\n # print ynames\n #print 'yname dict', ynames_dict\n\n hl = False\n for jj in range(number_y):\n coli = color_dict[jj]\n style = line_style_dict[jj] # '--' #'None'\n thickness = default_plot_specs['linewidth']\n if style == '--':\n thickness = thickness*dash_width_factor\n hl = True\n hl_num = 3.6\n dashi = True\n else:\n dashi = False\n if marker:\n if yer is None:\n markstyli = marker_style_dict[jj]\n if ynames is None or jj>len(ynames)-1 or not ynames[jj]:\n if dashi:\n ax_1.plot(x[jj], y[jj], color=coli, marker=markstyli\n , markersize=default_plot_specs['marker_size'],\n dashes=dashes)\n else:\n ax_1.plot(x[jj], y[jj], color=coli, marker=markstyli, linestyle=style\n , markersize=default_plot_specs['marker_size'],\n linewidth=thickness)\n else:\n if dashi:\n ax_1.plot(x[jj], y[jj], color=coli, label=ynames[jj], marker=markstyli\n , markersize=default_plot_specs['marker_size'],\n dashes=dashes)\n else:\n ax_1.plot(x[jj], y[jj], color=coli, label=ynames[jj], marker=markstyli,\n linestyle=style, markersize=default_plot_specs['marker_size'],\n linewidth=thickness)\n # else:\n # ax_1.plot(x[jj], y[jj], color=coli,linestyle=style)\n else:\n if ynames is None or jj > len(ynames) - 1:\n if dashi:\n ax_1.plot(x[jj], y[jj], color=coli, linewidth=thickness,dashes=dashes)\n else:\n ax_1.plot(x[jj], y[jj], color=coli, linewidth=thickness, linestyle=style)\n else:\n if dashi:\n ax_1.plot(x[jj], y[jj], color=coli, linewidth=thickness,label=ynames[jj],dashes=dashes)\n else:\n ax_1.plot(x[jj], y[jj], color=coli, linewidth=thickness, linestyle=style,\n label=ynames[jj])\n\n\n\n if yer is not None:\n\n # ax_1.plot(x[jj], yer_datas_high, color=coli,\n # label=y_names[jj] + ' + SE', linestyle='--')\n # ax_1.plot(x[jj], yer_datas_low, color=coli,\n # label=y_names[jj] + ' - SE', linestyle='--')\n if marker:\n markstyli = marker_style_dict[jj]\n if markstyli and not style:\n capsizi = default_plot_specs['cap_size']\n else:\n capsizi = None\n if ynames is None or jj > len(ynames) - 1:\n if dashi:\n ax_1.errorbar(x[jj],y[jj], yer[jj], color=coli,marker=markstyli,\n markersize=default_plot_specs['marker_size'],\n capsize=capsizi,\n linewidth=default_plot_specs['linewidth'],dashes=dashes)\n else:\n ax_1.errorbar(x[jj],y[jj], yer[jj], color=coli,marker=markstyli,\n markersize=default_plot_specs['marker_size'],\n capsize=capsizi,\n linewidth=default_plot_specs['linewidth'],linestyle=style)\n else:\n if dashi:\n ax_1.errorbar(x[jj],y[jj], yer[jj], color=coli,marker=markstyli,\n markersize=default_plot_specs['marker_size'],\n capsize=capsizi,\n label=y_names[jj],\n linewidth=default_plot_specs['linewidth'],dashes=dashes)\n else:\n ax_1.errorbar(x[jj],y[jj], yer[jj], color=coli,marker=markstyli,\n markersize=default_plot_specs['marker_size'],\n capsize=capsizi,\n label=y_names[jj],\n linewidth=default_plot_specs['linewidth'],linestyle=style)\n else:\n yer_datas_high = [y_i + y_er_i for y_i, y_er_i in zip(y[jj], yer[jj])]\n yer_datas_low = [y_i - y_er_i for y_i, y_er_i in zip(y[jj], yer[jj])]\n ax_1.plot(x[jj], yer_datas_high, color=coli, linestyle='--',dashes=dashes)\n ax_1.plot(x[jj], yer_datas_low, color=coli, linestyle='--',dashes=dashes)\n\n if default_plot_specs['yrotation'] is 'vertical':\n if default_plot_specs['ylabelspace'] ==0:\n ax_1.set_ylabel(y_label, **default_plot_specs['axis_font'])\n else:\n labpad = int(default_plot_specs['axis_font']['size'])*default_plot_specs['ylabelspace']\n ax_1.set_ylabel(y_label,labelpad=labpad, **default_plot_specs['axis_font'])\n else:\n labpad =int(default_plot_specs['axis_font']['size'])*3\n #ax_1.set_ylabel(y_label,rotation=plotspecs['yrotation'],labelpad=int(labpad), **default_plot_specs['axis_font'])\n ax_1.set_ylabel(y_label, rotation=default_plot_specs['yrotation'],labelpad=labpad, horizontalalignment = 'center',verticalalignment ='center',\n **default_plot_specs['axis_font'])\n\n\n # Set the tick labels font\n for labeli in (ax_1.get_xticklabels() + ax_1.get_yticklabels()):\n # labeli.set_fontname('Arial')\n labeli.set_fontsize(default_plot_specs['ticksize'])\n\n ax_1.set_xlabel(x_label, **default_plot_specs['axis_font'])\n\n\n xlow, xhigh = min(x[0]), max(x[0])\n for xx in x[1:]:\n mycopy_low = [g for g in copy.deepcopy(xx)]\n mycopy_high = [g for g in copy.deepcopy(xx)]\n mycopy_low.append(xlow)\n mycopy_high.append(xhigh)\n xlow, xhigh = min(mycopy_low), max(mycopy_high)\n # set axes limits\n if domain is None:\n extra = (xhigh-xlow)*default_plot_specs['x_scale']\n xlow -= extra\n xhigh +=extra\n\n\n #Make vertical lines\n for xfloat in vlines:\n if xlow < xfloat < xhigh:\n ax_1.axvline(x=xfloat,color = default_plot_specs['vlinecolor'],linestyle= default_plot_specs['vlinestyle'],linewidth=default_plot_specs['vlineswidth'])\n\n # if not marker:\n # xhigh -= 15\n\n if yrange is None:\n if y:\n if y[0]:\n if yer is not None:\n ylow, yhigh = min([yi-yi_er for yi, yi_er in zip(y[0],yer[0])]), max([yi+yi_er for yi, yi_er in zip(y[0],yer[0])])\n else:\n ylow, yhigh = min(y[0]), max(y[0])\n else:\n ylow, yhigh = 0, 0\n else:\n ylow, yhigh = 0, 0\n if yer is not None:\n for yy, yy_er in zip(y[1:],yer[1:]):\n ylow, yhigh = min([ylow] + [yi-yi_er for yi, yi_er in zip(yy,yy_er)]), max([yhigh]+ [yi+yi_er for yi, yi_er in zip(yy,yy_er)])\n else:\n for yy in y[1:]:\n ylow, yhigh = min([ylow] + yy), max([yhigh] + yy)\n extra = (yhigh-ylow)*default_plot_specs['y_scale']\n ylow -= extra\n yhigh +=extra\n\n\n ax_1.set_xlim(xlow, xhigh)\n ax_1.set_ylim(ylow, yhigh)\n\n while under_text:\n texti = under_text.pop(0)\n plt.figtext(0.08, text_heights.pop(0), texti, default_plot_specs['undertext_font'])\n\n if text:\n ax_1.text(default_plot_specs['text_loc'][0], default_plot_specs['text_loc'][1], text,\n verticalalignment='bottom', horizontalalignment='right',\n transform=ax_1.transAxes,\n color=default_plot_specs['text_color'], fontsize=default_plot_specs['text_size'])\n\n #print 'display_leg_numbers', display_leg_numbers\n\n\n if default_plot_specs['xshade']:\n ax_1.axvspan(default_plot_specs['xshade'][0], default_plot_specs['xshade'][1], alpha=0.3, color=default_plot_specs['xshade_color'])\n\n if ynames:\n # print 'the display leg numbers '\n # print display_leg_numbers\n\n handles, labels = ax_1.get_legend_handles_labels()\n handles = [handle for i,handle in enumerate(handles) if i in display_leg_numbers]\n labels = [label for i,label in enumerate(labels) if i in display_leg_numbers]\n if groupings_labels_within:\n mini = min(len(list(custom_legend_entries_dict.keys())),len(groupings_labels_within))\n handles += [custom_legend_entries_dict[k] for k in range(mini)]\n labels += groupings_labels_within[:mini]\n if hl:\n lgd = ax_1.legend(handles, labels, loc=default_plot_specs['legend_anchor'],\n bbox_to_anchor=default_plot_specs['legend_loc'],\n prop=default_plot_specs['legend_font'], ncol=n_legend_columns,handlelength=hl_num)\n else:\n lgd = ax_1.legend(handles, labels, loc=default_plot_specs['legend_anchor'],\n bbox_to_anchor=default_plot_specs['legend_loc'],\n prop=default_plot_specs['legend_font'], ncol=n_legend_columns)\n\n if legend_title:\n lgd.set_title(legend_title,prop=default_plot_specs['legend_font'])\n\n plt.setp(lgd.get_title(), multialignment='center')\n\n # if hl:\n # print 'doing hl 2'\n # ax_1.legend(handlelength=2)\n\n\n if default_plot_specs['nxticks'] > 0:\n #visible_labelsx = [lab for lab in ax_1.get_xticklabels() if lab.get_visible() is True and lab.get_text() != '']\n for lab in ax_1.get_xticklabels():\n lab.set_visible(True)\n visible_labelsx = [lab for lab in ax_1.get_xticklabels() if lab.get_visible() is True]\n visible_labelsx=visible_labelsx[1::default_plot_specs['nxticks']]\n plt.setp(visible_labelsx, visible = False)\n #\n #ax_1.set_xticks(visible_labelsx[1::2])\n #plt.setp(visible_labels[1::2], visible=False)\n #ax_1.locator_params(axis='x', nticks=default_plot_specs['nxticks'])\n #\n if default_plot_specs['nyticks'] > 0:\n # #ax_1.locator_params(axis='y', nticks=default_plot_specs['nyticks'])\n visible_labelsy = [lab for lab in ax_1.get_yticklabels() if lab.get_visible() is True]\n if len(visible_labelsy) > 4:\n visible_labelsy = visible_labelsy[2:-2]\n plt.setp(visible_labelsy, visible=False)\n\n #plt.grid('off')\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n save_dir = os.path.join(save_dir,'%s.png' % the_label)\n\n if save:\n save_fig(fig, save_dir)\n else:\n return fig, save_dir", "def updatePlot(self):\n self.axes.clear()\n nc = len(self.curvelist)\n xpos = self.curvelist[0].xvinfo.vidx\n for i in range(nc):\n ypos = self.curvelist[i].yvinfo.vidx\n self.axes.plot(self.data[xpos],\n self.data[ypos], self.col[i])\n if self.idata is not None:\n self.axes.plot(self.idata[xpos],\n self.idata[ypos], self.col[i]+'.')\n self.canvas.draw()", "def multiPlot(self,indexSelect=None,varSelect=None,wrapNumber=5,\n compLines=None, save = None, xlim = None,\n forceYAxisZero = True, colourOverride = None,\n style = None, legend = None, varAsAxis = False,\n xAxisLabel = None, yAxisLabel = None, figsize = (12,10),\n legendLoc = 'lower right'):\n if isinstance(compLines,list):\n compVars = [list(i.columns) for i in compLines]\n dfB = [i.copy() for i in compLines]\n for i in range(len(compLines)):\n if \"Time\" not in compVars[i]:\n dfB[i][\"Time\"]=dfB[i].index\n else:\n compVars[i].remove(\"Time\")\n dfB[i] = pd.melt(dfB[i], id_vars=[\"Time\"],\n value_vars=compVars[i])\n elif compLines is not None:\n compVars=list(compLines.columns)\n dfB = compLines.copy()\n if \"Time\" not in compVars:\n dfB[\"Time\"]=dfB.index\n else:\n compVars.remove(\"Time\")\n dfB = pd.melt(dfB,id_vars=[\"Time\"],\n value_vars=compVars)\n if varSelect is None:\n varSelect=list(self.longData['variable'].unique())\n if indexSelect is None:\n indexSelect=list(self.longData['index'].unique())\n if not isinstance(indexSelect,list):\n indexSelect = [indexSelect]\n if len(varSelect)<wrapNumber:\n #cols = math.floor(math.sqrt(len(varSelect)))\n cols = math.ceil(math.sqrt(len(varSelect)))\n else:\n cols = wrapNumber\n rows = math.ceil(len(varSelect)/cols)\n if style is None:\n myStyle = \"darkgrid\"\n else:\n myStyle = style\n with sns.axes_style(style):\n fig, axs = plt.subplots(rows, cols, sharex=True,\n figsize=figsize)\n if (rows>1):\n axs = trim_axs(axs, len(varSelect))\n elif (cols==1):\n axs = [axs]\n if colourOverride is not None:\n myColorMap = plt.get_cmap(name=\"cool\")\n else:\n myColorMap = plt.get_cmap(name=\"hsv\",\n lut=len(indexSelect)+1)\n for ax, theVar, j in zip(axs, varSelect, range(len(varSelect))):\n if varAsAxis:\n if isinstance(yAxisLabel,list):\n ax.set_ylabel(theVar+\" \"+yAxisLabel[j])\n elif yAxisLabel is not None:\n ax.set_ylabel(theVar+\" \"+yAxisLabel)\n else:\n ax.set_ylabel(theVar)\n else:\n ax.set_title(theVar)\n if isinstance(yAxisLabel,list):\n ax.set_ylabel(yAxisLabel[j])\n elif yAxisLabel is not None:\n ax.set_ylabel(yAxisLabel)\n if xAxisLabel is not None:\n ax.set_xlabel(xAxisLabel)\n df = self.longData\n df = df[df['variable']==theVar]\n if indexSelect is not None:\n for theIndex, i in zip(indexSelect,\n range(len(indexSelect))):\n df2 = df[df['index']==theIndex]\n if colourOverride is not None:\n ax.plot(df2[\"Time\"], df2[\"value\"],\n linestyle='solid',\n color=myColorMap(colourOverride[i]))\n else:\n ax.plot(df2[\"Time\"], df2[\"value\"],\n linestyle='solid',\n color=myColorMap(i))\n if isinstance(compLines,list):\n for i, theIndex in enumerate(indexSelect):\n dfB2 = dfB[theIndex][\n dfB[theIndex]['variable']==theVar]\n if colourOverride is not None:\n ax.plot(dfB2[\"Time\"], dfB2[\"value\"],\"o\",\n color=myColorMap(colourOverride[i]))\n else:\n ax.plot(dfB2[\"Time\"], dfB2[\"value\"],\"o\",\n color=myColorMap(i))\n elif compLines is not None:\n dfB2 = dfB[dfB['variable']==theVar]\n ax.plot(dfB2[\"Time\"], dfB2[\"value\"],\"ko\")\n if xlim is not None:\n ax.set_xlim(xlim)\n if forceYAxisZero:\n ax.set_ylim([0, None])\n if legend is not None:\n if colourOverride is not None:\n custom_lines = [Line2D([0], [0], color=myColorMap(\n colourOverride[i]), lw=4)\n for i in range(len(indexSelect))]\n else:\n custom_lines = [Line2D([0], [0], color=myColorMap(i),\n lw=4)\n for i in range(len(indexSelect))]\n if ((not isinstance(compLines,list)) and\n (compLines is not None)):\n custom_lines.append(Line2D([0], [0], \n color=\"k\", lw=4))\n fig.legend(custom_lines, legend,\n loc = legendLoc)\n fig.tight_layout()\n if save is not None:\n fig.savefig(save)", "def graphplot(self):\n if self.binned:\n self.line.set_ydata(self.fft_bins_y)\n else:\n self.line.set_ydata(self.spec_y)\n self.line2.set_ydata(self.wave_y)\n self.ax1.draw_artist(self.ax1.patch)\n self.ax2.draw_artist(self.ax2.patch)\n self.ax1.draw_artist(self.line)\n self.ax2.draw_artist(self.line2)\n self.fig.canvas.update()\n self.fig.canvas.flush_events()", "def updatePlot(self):\n if len(self.baslin):\n X = list(t[0] for t in self.baslin)\n Y = list(t[1] for t in self.baslin)\n self.BLplt.set_xdata(X)\n self.BLplt.set_ydata(Y)\n if self.BLtyp == 'S':\n if self.BL is None:\n self.BL, = self.axes.plot(self.data[0], self.data[2], linestyle='-', color='green')\n else:\n self.BL.set_ydata(self.data[2])\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='red')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='gray')\n self.canvas.draw()", "def update_lines(num, ax, fargs):\n \n dataLines, lines = fargs\n for line, data in zip(lines, dataLines):\n # NOTE: there is no .set_data() for 3 dim data...\n line.set_data(data[0:2, :num])\n line.set_3d_properties(data[2, :num])\n return lines", "def update(self):\n self.line.set_ydata(self._get_y_data())\n self.figure.canvas.draw()", "def plot(self):\n # Get data\n #print(self.file_name)\n fig, ax = plb.subplots(1,1,figsize=(18,20))\n for key,value in self.testTrend.items():\n x = np.arange(len(self.data_array))\n y = np.asarray(value)\n plb.plot(x,y, label=key)\n ax.scatter(x, y)\n for i in range(0, len(value)):\n ax.annotate(str(i), (x[i], y[i]))\n # Title\n plb.title(self.file_name)\n # Legend\n plb.legend(bbox_to_anchor=(.05, 1), loc='best', borderaxespad=0.)\n # x ticks\n plb.xticks(np.arange(min(x), max(x) + 1, 2.0))\n #plb.ylim(-250, 1)\n # Show image\n plb.show()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def qline(xlist,ylist):\n dislin.qplot(xlist,ylist,len(xlist))", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.canvas.draw()", "def _set_axes(self):\n self += helper.line(stroke=\"black\", x1=self.__dict__['x'], x2=self.__dict__['x'], y1=0, y2=self.__dict__['y']*2)\n self += helper.line(stroke=\"black\", x1=0, x2=self.__dict__['x']*2, y1=self.__dict__['y'], y2=self.__dict__['y'])", "def plot_trajectories_2d(trajectories: pd.DataFrame, ax: Optional[plt.Axes] = None):\n if ax is None:\n fig, ax = plt.subplots()\n\n for cell in trajectories:\n ax.plot(cell[\"position_x\"].values, cell[\"position_y\"].values)\n\n ax.scatter(\n cell[\"position_x\"].values[-1], cell[\"position_y\"].values[-1], marker=\"o\"\n )\n\n return ax", "def initPlotY(self):\n\n self.plotFineY = [np.array([]) for i in range(len(self.plotFineX))]", "def line_plot(self, x, y, labels, ax=None):\n if ax is None:\n fig, ax = plt.subplots()\n else:\n fig = None\n ax.plot(x, y, '--o', label=labels[0])\n ax.set_xlabel(labels[1])\n ax.set_ylabel(labels[2])\n ax.set_title(labels[3])\n return fig, ax", "def draw_lines(asr,ax):\n r = asr.value\n y = 475.\n x = (r**2-y**2)**(.5)\n xs = np.linspace(-x,x,10)\n yt = np.zeros(xs.size)+y\n yb = np.zeros(xs.size)-y\n ax.plot(xs,yt,'-.',color='red',alpha=1.,linewidth=2,zorder=5000)\n ax.plot(xs,yb,'-.',color='red',alpha=1.,linewidth=2,zorder=5000)\n return ax", "def update(self, xdata=[], ydata=[]):\n for i in range(4):\n self.lines[i].set_data(xdata[i], ydata[i])\n\n self.blit()", "def plot(self,ax,**kwargs):\n # filter arguments for field lines\n plot_kwargs = { k: kwargs[k] for k in self.__line_args if kwargs.has_key(k)} \n # apply custom plot style set manually with set_plotstyle()\n plot_kwargs.update(self._plot_style)\n # plot lines\n for i,field in enumerate(self.data):\n self.lines[i], = ax.plot(field.x, field.f,'k',**plot_kwargs)\n tdc_Data_vs_X_Plotter.plot(self,ax)", "def plot_data(self):", "def init_plot(self, num_axes):\r\n self.i = []\r\n self.val = []\r\n plt.ion()\r\n self.axes = plt.gca()\r\n self.lines =[]\r\n\r\n for i in range(num_axes):\r\n self.val.append([])\r\n self.lines.append([])\r\n self.lines[i], = self.axes.plot([], self.val[0], '-', c=[random.random() for _ in range(3)], linewidth=1.5, markersize=4)", "def multi_plot(x, y, y_legend=[] ,title=\"Title\", xlab=\"x-axis\", ylab=\"y-axis\"):\n\n if y_legend==[]:\n for i in range(0, np.size(y,0)):\n plt.plot(x, y[i][:], linewidth=2)\n else:\n for i in range(0, np.size(y,0)):\n plt.plot(x, y[i][:], label=y_legend[i], linewidth=2)\n plt.legend(prop={'size': 12}) #legend details\n\n plt.title(title)\n plt.xlabel(xlab)\n plt.ylabel(ylab)", "def plot_graph(self, dataset):\n data = self.data\n diagrams = []\n\n for time_stamp, data_tag in dataset:\n data_x, data_y = [], []\n for item in data:\n data_x.append(item[time_stamp])\n data_y.append(item[data_tag])\n diagrams.append(Scatter(x=data_x, y=data_y, mode='markers'))\n\n layout = plotly.graph_objs.Layout(yaxis=dict(autorange='reversed'))\n data = Data(diagrams)\n fig = plotly.graph_objs.Figure(data=data, layout=layout)\n plotly.plotly.plot(fig, filename='exo-line')", "def plot_delta(fname, temp, delta_ts_list, delta_rxn_list, labels, var='G'):\n max_y_lines = 5\n x_axis = np.array([0, 1, 3, 4, 6, 7])\n y_axis = []\n y_labels = []\n # y_min = np.floor(np.array(g_rxn_list).min())\n # y_max = np.ceil(np.array(g_ts_list).max())\n for index in range(max_y_lines):\n try:\n y_axis.append(np.array([0.0, 0.0, delta_ts_list[index], delta_ts_list[index],\n delta_rxn_list[index], delta_rxn_list[index]]))\n except IndexError:\n y_axis.append(None)\n try:\n y_labels.append(labels[index])\n except IndexError:\n y_labels.append(None)\n\n make_fig(fname, x_axis, y_axis[0],\n x_label='reaction coordinate', y_label='\\u0394' + var + ' at {} K (kcal/mol)'.format(temp),\n y1_label=y_labels[0], y2_label=y_labels[1], y3_label=y_labels[2], y4_label=y_labels[3],\n y5_label=y_labels[4], y2_array=y_axis[1], y3_array=y_axis[2], y4_array=y_axis[3], y5_array=y_axis[4],\n ls2='-', ls3='-', ls4='-', ls5='-',\n # y_lima=y_min, y_limb=y_max,\n hide_x=True,\n )", "def plot_mult_locations(sf, df, data, dcounts, geoid, all_geoids, l, b, w_map = 2.5, w_time = 3, h=3, \n colors = ['orange','palevioletred','steelblue','olive'], \n markers = ['o','^','s','P']):\n #plot timeseries\n ax = None\n ax = plot_mult_timetrends(data, geoid, cols = [i for i in data.columns if (i.endswith('21day_avg') and\n i[:12] in geoid)],\n area = [l + w_map + 0.3,b + h/2, w_time, h/2], colors = colors,\n markers = markers, sharex = ax, ylim_bottom = -50, ylim_top = 50,\n xlabels=[''] * 6)\n \n # plot dcount timeseries\n ax = None\n ax = plot_mult_timetrends(dcounts, geoid, cols = [i for i in data.columns if (i.endswith('21day_avg') and\n i[:12] in geoid)],\n area = [l + w_map + 0.3,b,w_time,h/2], colors = colors, markers = markers, sharex = ax,\n ylim_bottom = 0, ylim_top = 200, ylabel = 'Device count',\n xlabels=data.index[np.arange(0,data.shape[0],28)].tolist())\n \n #plot map\n plt.axes([l,b,w_map,h])\n for i in df_edges[df_edges.ZIPR.isin(['98105','98195','98115','98102','98112'])].index:\n shape_ex = sf_edges.shape(i)\n x_lon = np.zeros((len(shape_ex.points),1))\n y_lat = np.zeros((len(shape_ex.points),1))\n for ip in range(len(shape_ex.points)):\n x_lon[ip] = shape_ex.points[ip][0]\n y_lat[ip] = shape_ex.points[ip][1]\n plt.plot(x_lon,y_lat, color = 'black')\n \n outline_geoids(sf = sf, df = df, geoids = all_geoids, include_labels=False)\n fill_blockgroups(sf = sf, df = df, geoids = geoid, colors=colors)\n \n \n plt.xlim(-122.325,-122.25)\n plt.ylim(47.645,47.68)\n plt.axis('off')", "def plot(self, data_frame):\n self.axes.plot(data_frame, 'o-')\n self.axes.set_ylim(0.0, 200.0)\n self.fig.autofmt_xdate()\n self.draw()", "def setup_axes(self):\n fig = plt.figure(1)\n axs = fig.add_subplot(1, 1, 1)\n fig.clf()\n axs = plt.subplots(1, 2)\n ax1 : plt.axis = axs[0]\n ax2 : plt.axis = axs[1]\n fig.canvas.draw()\n \n line1_t, = ax1.plot([], label='train')\n line1_v, = ax1.plot([], label='val')\n\n ax1.set_title('Loss vs Iterations')\n ax1.set_xlabel('Iterations')\n ax1.set_ylabel('Loss')\n ax1.grid(True)\n ax1.autoscale()\n # ax1.legend()\n\n line2_t, = ax2.plot([], label='train')\n line2_v, = ax2.plot([], label='val')\n\n ax2.set_title('Accuracy vs Iterations')\n ax2.set_xlabel('Time')\n ax2.set_ylabel('Percent Accuracy')\n ax2.grid(True)\n ax2.autoscale()\n # ax2.legend()\n\n lines = [line1_t, line1_v, line2_t, line2_v]\n\n return fig, ax1, ax2, lines", "def plot_line_graph(target_offenses, counts, year_list, filename):\n\t#this is to demonstrate line graphs but the data is categorical so you should actually be using bar graphs\n\tfig, ax = plt.subplots()\n\tcolors = [\"blue\",\"red\",\"orange\",\"green\",\"yellow\",\"purple\"]\n\tfor index,offense in enumerate(target_offenses):\n\t\tplt.plot(year_list, counts[index], color=colors[index], marker= 'o', label=offense)\n\tax.get_xaxis().get_major_formatter().set_useOffset(False)\t\n\tplt.xlabel('Year')\n\tplt.ylabel('Number of offenses')\n\tplt.legend()\n\tplt.savefig(filename,format=\"png\")\n\tplt.show()", "def draw(values):\n\n # Turn on grid with dashed style\n subplot.yaxis.grid(True, linestyle=\"dashed\")\n\n # Get list of new higher values\n new_values = get_new_values(values)\n\n # Plot 2 lines\n subplot.plot(range(len(values)), values)\n subplot.plot(range(len(new_values)), new_values, linewidth=2)\n\n # Print left plot title\n pyplot.title(\n \"Press X to exit\\nPress S to save\",\n loc=\"left\",\n fontsize=14,\n color=\"#1F76B4\",\n style=\"italic\",\n pad=20,\n )\n\n # Print right plot title\n pyplot.title(\n f\"{'Max objective:':>25}{max(values):>10.2E}\\n\"\n f\"{'Generation:':>25}{values.index(max(values)):>10}\",\n loc=\"right\",\n fontfamily=\"Lucida Sans Typewriter\",\n fontsize=12,\n color=\"#FF7E0E\",\n pad=20,\n )", "def line_SFR_array(lines,**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n fig, axs = plt.subplots(len(lines), sharex='col',\\\n figsize=(6,15),facecolor='w',\\\n gridspec_kw={'hspace': 0, 'wspace': 0})\n\n for i,ax in enumerate(axs):\n\n #line_SFR(line=lines[i],ax=ax,select=p.select,sim_run=p.sim_runs[0],nGal=p.nGals[0],add_obs=p.add_obs,MS=p.MS,add=True,cb=True)\n line_SFR(line=lines[i],ax=ax,select=p.select,sim_run=p.sim_runs[1],nGal=p.nGals[1],add_obs=p.add_obs,MS=p.MS,add=True,cb=True)\n #line_SFR(line=lines[i],ax=ax,select=p.select,sim_run=p.sim_runs[0],nGal=p.nGals[0],add_obs=False,add=True,cb=False)\n\n # Only 1 galaxy\n #line_SFR(line=lines[i],ax=ax,select=p.select,sim_run=p.sim_runs[0],nGal=p.nGals[0],add_obs=True,add=True,cb=False)\n\n\n plt.tight_layout()\n\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'luminosity/'): os.mkdir(p.d_plot + 'luminosity/') \n plt.savefig(p.d_plot + 'luminosity/lines_SFR_array_%s%s%s_%s%s_%s.png' % (p.ext,p.grid_ext,p.table_ext,p.sim_name,p.sim_run,p.select), format='png', dpi=300)", "def _timeseries_scatter_plot_lines(axes):\n axes.axvline(\n x=0,\n ymin=-1000,\n ymax=1000,\n color=\"grey\",\n linestyle=\"dotted\",\n alpha=0.6,\n )\n axes.axhline(\n y=0,\n xmin=-1000,\n xmax=1000,\n color=\"grey\",\n linestyle=\"dotted\",\n alpha=0.6,\n )", "def plt_spec_lines():\n\n for i in range(0, Molecule.species_count):\n mid_line = (Molecule.right_endpt[i] + Molecule.left_endpt[i]) / 2\n shift1 = Molecule.energy[i] - PlotParameter.energy_vshift\n shift2 = Molecule.energy[i] + PlotParameter.name_vshift\n\n en = '{0:5.2f}'.format(Molecule.energy[i])\n\n plt.plot([Molecule.left_endpt[i], Molecule.right_endpt[i]], [Molecule.energy[i], Molecule.energy[i]],\n color=PlotParameter.species_line_color, lw=PlotParameter.species_line_width, linestyle='-')\n plt.text(mid_line, shift1, en, weight='bold', horizontalalignment='center',\n fontsize=PlotParameter.energy_font_size, color='black')\n plt.text(mid_line, shift2, Molecule.name[i], weight='bold', horizontalalignment='center',\n fontsize=PlotParameter.name_font_size, color='black')", "def make_lineplot(df, features, dataset, start_date, end_date):\n fig, ax = plt.subplots(len(features), 1, sharex=True)\n colors = ['b', 'r', 'g', 'c', 'm', 'y', 'k']\n for ind, feature in enumerate(features):\n ax[ind].plot(df.loc[start_date:end_date][feature], colors[ind])\n ax[ind].xaxis.set_major_locator(mdates.DayLocator([5, 10, 15, 20, 25, 30]))\n ax[ind].xaxis.set_major_formatter(mdates.DateFormatter('%d/%m'))\n ax[ind].set_ylabel(feature)\n ax[ind].grid()\n plt.xticks(rotation=45)\n plt.xlabel(\"time\")\n if len(features) > 1:\n plt.savefig('plots/%s/lineplot_%s.png' % (dataset, '_'.join(features)), bbox_inches='tight')\n else:\n plt.savefig('plots/%s/lineplot_%s.png' % (dataset, features[0]), bbox_inches='tight')", "def plotstocksdata(datadict,formats):\n\t#plot data\n\tf = plt.figure()\n\tax1 = plt.subplot(111)\n\tdata = datadict[\"yahoo\"]\n\tyahoo = ax1.plot(data['date'],data['val'],formats[\"yahoo\"], label = 'Yahoo Stock Value',linewidth = 1.5)\n\tdata = datadict[\"google\"]\n\tgoogle = ax1.plot(data['date'],data['val'],formats[\"google\"], label = 'Google Stock Value',linewidth = 1.5)\n\tax2 = ax1.twinx()\n\tdata = datadict[\"nytmp\"]\n\tnytmp = ax2.plot(data['date'],data['val'],formats[\"nytmp\"],label = 'NY Mon. High Temp',linewidth=1.5)\n\tax1.set_xlabel('Date (MJD)')\n\tax1.set_ylabel('Value (Dollars')\n\tax1.set_ylim((-20,765))\n\tax1.yaxis.set_minor_locator(plt.MultipleLocator(20))\n\tax1.set_xlim((48800, 55600))\n\tax1.xaxis.set_minor_locator(plt.MultipleLocator(200))\n\t#plt.show() #ISAAC EDIT\n\tax2.set_ylim((-150, 100))\n\tax2.set_ylim((-150, 100))\n\tax2.set_ylabel('Temperature ($^\\circ$F)')\n\tax2.yaxis.set_minor_locator(plt.MultipleLocator(10))\n\tplt.title('New York Temperature, Google, and Yahoo!', fontname = 'serif',fontsize = 18)\n\tplts = yahoo+google+nytmp\n\tlabels = [l.get_label() for l in plts]\n\tax1.legend(plts, labels, loc=(0.025,0.5) ,frameon=False, prop={'size':11}, markerscale = 2)\n\tplt.show()", "def plot_objective_multi(df, exp_config, output_dir, show):\n output_file_name = f\"{inspect.stack()[0][3]}.{FILE_EXTENSION}\"\n output_path = os.path.join(output_dir, output_file_name)\n\n plt.figure()\n\n for exp_name, exp_df in df.items():\n\n if \"rep\" in exp_config[\"data\"][exp_name]:\n\n exp_dfs = exp_df\n\n T = np.linspace(0, exp_config[\"t_max\"], 50000)\n\n y_list = []\n for i, df_i in enumerate(exp_dfs):\n df_i = df_i.sort_values(\"timestamp_end\")\n x, y = df_i.timestamp_end.to_numpy(), df_i.objective.cummin().to_numpy()\n f = interp1d(x, y, kind=\"previous\", fill_value=\"extrapolate\")\n y = f(T)\n y_list.append(y)\n\n y_list = np.asarray(y_list)\n y_mean = y_list.mean(axis=0)\n y_std = y_list.std(axis=0)\n y_se = y_std / np.sqrt(y_list.shape[0])\n\n plt.plot(\n T,\n y_mean,\n label=exp_config[\"data\"][exp_name][\"label\"],\n color=exp_config[\"data\"][exp_name][\"color\"],\n linestyle=exp_config[\"data\"][exp_name].get(\"linestyle\", \"-\"),\n )\n plt.fill_between(\n T,\n y_mean - 1.96 * y_se,\n y_mean + 1.96 * y_se,\n facecolor=exp_config[\"data\"][exp_name][\"color\"],\n alpha=0.3,\n )\n # plt.fill_between(T,\n # y_mean-1.96*y_std,\n # y_mean+1.96*y_std,\n # facecolor=exp_config[\"data\"][exp_name][\"color\"],\n # alpha=0.3)\n else:\n exp_df = exp_df.sort_values(\"timestamp_end\")\n x, y = exp_df.timestamp_end.to_numpy(), exp_df.objective.cummax().to_numpy()\n if \"hartmann6D\" in exp_name:\n y = y + 3.32237 # hartmann6D\n\n plt.plot(\n x,\n y,\n label=exp_config[\"data\"][exp_name][\"label\"],\n color=exp_config[\"data\"][exp_name][\"color\"],\n marker=exp_config[\"data\"][exp_name].get(\"marker\", None),\n markevery=len(x) // 5,\n linestyle=exp_config[\"data\"][exp_name].get(\"linestyle\", \"-\"),\n )\n\n ax = plt.gca()\n ticker_freq = exp_config[\"t_max\"] / 5\n ax.xaxis.set_major_locator(ticker.MultipleLocator(ticker_freq))\n ax.xaxis.set_major_formatter(minute_major_formatter)\n\n if exp_config.get(\"title\") and PRINT_TITLE:\n plt.title(exp_config.get(\"title\"))\n\n if MODE == \"min\":\n plt.legend(loc=\"upper right\")\n else:\n plt.legend(loc=\"lower right\")\n\n plt.ylabel(exp_config.get(\"ylabel\", \"Objective\"))\n plt.xlabel(\"Search time (min.)\")\n\n if exp_config.get(\"ylim\"):\n plt.ylim(*exp_config.get(\"ylim\"))\n\n if exp_config.get(\"xlim\"):\n plt.xlim(*exp_config.get(\"xlim\"))\n else:\n plt.xlim(0, exp_config[\"t_max\"])\n\n if exp_config.get(\"yscale\"):\n plt.yscale(exp_config.get(\"yscale\"))\n\n plt.grid()\n plt.tight_layout()\n plt.savefig(output_path, dpi=360)\n if show:\n plt.show()\n plt.close()", "def distance_plot(self, classified_lines):\n for regions, labels in classified_lines:\n for region, label in zip(regions, labels):\n start = self.attacker_position(region[0])\n end = self.attacker_position(region[1])\n plt.plot([start[0], end[0]], [start[1], end[1]],\n color=self.color(int(label)))", "def fdd_plot(result_file_list,default_fdds,ion_default):\n # Getting data from the file\n fdd_param_keys = [result_file.split('_')[0] for result_file in result_file_list]\n# print \" Parameters to plot for:\", fdd_param_keys\n fig = plt.figure(figsize=(14,12))#(8.27,11.69))\n axs = [None]*(len(result_file_list)*5 + 1) # List of axes: 2 ions x (1 uncert + 1 value) + 1 for iterations (axs[0] will not be used)\n for j,result_file_name in enumerate(result_file_list):\n with open(result_file_name,'r') as res_file:\n res_data = res_file.read().split('\\n')\n if res_data[-1] == \"\": # delete empty line if present\n del res_data[-1]\n else:\n pass\n fdd_list, H_list, Hunc_list, D_list, Dunc_list, n_iter_list,fdd_iter_list = [[] for i in range(7)]\n for line in res_data:\n line_split = line.split()\n if line_split[1] == '0' or line_split[1] == '-1':\n fdd_list.append(float(line_split[0].split('_')[-1])) # fdd_list\n fdd_iter_list.append(float(line_split[0].split('_')[-1])) # fdd_list for iteration subplot\n if line_split[1] == '0':\n n_iter_list.append(int(line_split[2])) # number of iterations list\n else:\n n_iter_list.append(-1) # 125, number of iterations\n H_list.append(float(line_split[3])) # H_list\n Hunc_list.append(float(line_split[4])) # Hunc_list\n D_list.append(float(line_split[5])) # D_list\n Dunc_list.append(float(line_split[6])) # Dunc_list\n elif line_split[1] == '1':\n print \" {} = {} <- excluded: error '1' (no f26 file was found, check 'stdout.dat' and 'stderr.dat')\".format(fdd_param_keys[j], line_split[0])\n fdd_iter_list.append(float(line_split[0].split('_')[-1])) # fdd_list for iteration subplot\n n_iter_list.append(0) # number of iterations list\n elif line_split[1] == '2':\n print \" {} = {} <- excluded: error '2' (error happened during VPFITing, check 'stderr.dat')\".format(fdd_param_keys[j], line_split[0])\n\t\tfdd_iter_list.append(float(line_split[0].split('_')[-1])) # fdd_list for iteration subplot\n\t\tn_iter_list.append(0) # number of iterations list\n elif line_split[1] == '3':\n print \" {} = {} <- excluded: error '3' (zero size f26 file, check 'stdout.dat')\".format(fdd_param_keys[j], line_split[0])\n\t\tfdd_iter_list.append(float(line_split[0].split('_')[-1])) # fdd_list for iteration subplot\n\t\tn_iter_list.append(0) # number of iterations list\n elif line_split[1] == '4':\n print \" {} = {} <- excluded: error '4' (new and original f26's have different number of lines, check 'stdout.dat')\".format(fdd_param_keys[j], line_split[0])\n fdd_iter_list.append(float(line_split[0].split('_')[-1])) # fdd_list for iteration subplot\n\t\tn_iter_list.append(-3) # number of iterations list\n elif line_split[1] == '5':\n print \" {} = {} <- excluded: error '5' ('****' uncertainty was found for one of specified ion! Check f26 file)\".format(fdd_param_keys[j], line_split[0])\n fdd_iter_list.append(float(line_split[0].split('_')[-1])) # fdd_list for iteration subplot\n\t\tn_iter_list.append(-2) # number of iterations list\n else:\n print \" Unknown error code was specified in {}\".format(result_file_name)\n\n # Data are ready to plot\n # First column of subplots (uncertainties)\n axs[5*j+1] = plt.subplot(len(fdd_param_keys),3,3*j+1)\n axs[5*j+1].plot(fdd_list,Dunc_list,color='b',drawstyle=\"steps-mid\") # Plotting data\n axs[5*j+1].plot(default_fdds[fdd_param_keys[j]],ion_default[3],'bo',mew=0.0) # plotting default result\n axs[5*j+1].set_ylabel(r'$\\Delta$N(D I)',color='b')\n axs[5*j+1].tick_params('y',colors='b')\n axs[5*j+1].ticklabel_format(useOffset=False)\n axs[5*j+1].set_xscale('log')\n if fdd_param_keys[j] == 'fdbstep':\n axs[5*j+1].set_xlabel('{} [{:g}], km/s'.format(fdd_param_keys[j],default_fdds[fdd_param_keys[j]]))\n else:\n axs[5*j+1].set_xlabel('{} [{:g}]'.format(fdd_param_keys[j],default_fdds[fdd_param_keys[j]]))\n axs[5*j+1].axvline(default_fdds[fdd_param_keys[j]],linewidth=1,color='g') # indicate fdd from vp_setup.dat\n# axs[5*j+1].margins(y=np.ptp(Dunc_list)/2.0) # Include marging above and below the data to show default fdds\n axs[5*j+2] = axs[5*j+1].twinx() # second plot on the same axis\n axs[5*j+2].plot(fdd_list,Hunc_list,color='r',drawstyle=\"steps-mid\")\n axs[5*j+2].plot(default_fdds[fdd_param_keys[j]],ion_default[1],'ro',mew=0.0)\n axs[5*j+2].set_ylabel(r'$\\Delta$N(H I)',color='r')\n axs[5*j+2].tick_params('y',colors='r')\n axs[5*j+2].ticklabel_format(useOffset=False,axis='y')\n # Second column of subplots (middle values)\n axs[5*j+3] = plt.subplot(len(fdd_param_keys),3,3*j+2)\n plot_interal = False # Add 1-sigma interval\n if plot_interal == True:\n median = np.median(D_list)\n med_unc = np.median(Dunc_list)\n print \"median = {} +/- {} for {}\".format(median,med_unc,fdd_param_keys[j])\n axs[5*j+3].fill_between(fdd_list, median-med_unc, median+med_unc,facecolor='green', alpha=0.4)\n axs[5*j+3].plot(fdd_list,D_list,color='b',drawstyle=\"steps-mid\",zorder=2) # Plotting data\n axs[5*j+3].plot(default_fdds[fdd_param_keys[j]],ion_default[2],'bo',zorder=10,mew=0.0)\n axs[5*j+3].set_ylabel(r'N(D I)',color='b')\n axs[5*j+3].ticklabel_format(useOffset=False)\n axs[5*j+3].tick_params('y',colors='b')\n axs[5*j+3].set_xscale('log')\n if fdd_param_keys[j] == 'fdbstep':\n axs[5*j+3].set_xlabel('{} [{:g}], km/s'.format(fdd_param_keys[j],default_fdds[fdd_param_keys[j]]))\n else:\n axs[5*j+3].set_xlabel('{} [{:g}]'.format(fdd_param_keys[j],default_fdds[fdd_param_keys[j]]))\n axs[5*j+3].axvline(default_fdds[fdd_param_keys[j]],linewidth=1,color='g') # indicate fdd from vp_setup.dat\n# axs[5*j+3].margins(y=np.ptp(D_list),tight=False) # Include marging above and below the data to show default fdds\n axs[5*j+4] = axs[5*j+3].twinx()\n axs[5*j+4].plot(fdd_list,H_list,color='r',drawstyle=\"steps-mid\",zorder=1)\n axs[5*j+4].plot(default_fdds[fdd_param_keys[j]],ion_default[0],'ro',zorder=10,mew=0.0)\n axs[5*j+4].set_ylabel(r'N(H I)',color='r')\n axs[5*j+4].tick_params('y',colors='r')\n axs[5*j+4].ticklabel_format(useOffset=False,axis='y')\n # Third column of subplots (number of iterations)\n axs[5*j+5] = plt.subplot(len(fdd_param_keys),3,3*j+3)\n axs[5*j+5].plot(fdd_iter_list,n_iter_list,drawstyle=\"steps-mid\") # Plotting n_iter_list\n# axs[5*j+5].yaxis.tick_right()\n# axs[5*j+5].yaxis.set_label_position(\"right\")\n axs[5*j+5].set_ylabel('N iterations')\n axs[5*j+5].ticklabel_format(useOffset=False)\n axs[5*j+5].set_xscale('log')\n if fdd_param_keys[j] == 'fdbstep':\n axs[5*j+5].set_xlabel('{} [{:g}], km/s'.format(fdd_param_keys[j],default_fdds[fdd_param_keys[j]]))\n else:\n axs[5*j+5].set_xlabel('{} [{:g}]'.format(fdd_param_keys[j],default_fdds[fdd_param_keys[j]]))\n axs[5*j+5].axvline(default_fdds[fdd_param_keys[j]],linewidth=1,color='g') # indicate fdd from vp_setup.dat\n axs[5*j+5].axhline(0,linewidth=1,color='grey',ls=\":\") # indicate fdd from vp_setup.dat\n\n# plt.tight_layout()\n fig.subplots_adjust(hspace=0.3,wspace=0.9) # Set up spaces between subplots\n plt.savefig(\"fdd_plot.pdf\",bbox_inches='tight', pad_inches=0)\n plt.show()\n plt.close()\n print \" Plot is done!\"", "def plot(self, x_list, y_list, values, highlight_dict, figsize=(12, 8), lw=2,\n secondary_alpha=1, x_label=None, y_label=None, xlim=None, ylim=None,\n ax=None, upside_down=False, **kwargs):\n if ax is None:\n # create subplot\n fig, ax = plt.subplots(figsize=figsize, facecolor=self.background_color)\n ax.set_facecolor(self.background_color)\n\n return_figax = True\n else:\n return_figax = False\n\n # length of values dict\n len_y = len(y_list)\n\n # iterate thorugh the dictionary and plot the chart\n for key, value in values.items():\n\n # find value in highlight_dict\n if highlight_dict.get(key):\n line_color = highlight_dict[key] # fetch the required color\n color = line_color\n zorder = 3\n alpha = 1\n marker = self.scatter_primary\n else:\n color = self.scatter_color\n line_color = self.line_color\n zorder = 2\n alpha = secondary_alpha\n marker = self.scatter_points\n\n # to plot upside down bumpy chart\n if upside_down:\n if len_y % 2 == 0:\n add_value = 0\n else:\n add_value = 1\n\n # y-coordinate to plot scatter points\n y = np.array(value) + add_value\n\n # coordinates for bezier curve\n verts = [(i + d, vij + add_value) for i, vij in enumerate(value)\n for d in (-self.curviness, 0, self.curviness)][1: -1]\n\n else:\n if len_y % 2 == 0:\n add_value = 1\n else:\n add_value = 0\n\n # y-coordinate to plot scatter points\n y = len_y - np.array(value) + add_value\n\n # coordinates for bezier curve\n verts = [(i + d, len_y - vij + add_value) for i, vij in enumerate(value)\n for d in (-self.curviness, 0, self.curviness)][1: -1]\n\n # plot scatter-points\n if self.scatter != \"value\":\n ax.scatter(\n np.arange(len(value)), y,\n marker=marker,\n color=color,\n s=self.scatter_size,\n alpha=alpha,\n zorder=zorder\n )\n elif self.scatter == \"value\" and highlight_dict.get(key):\n ax.scatter(\n np.arange(len(value)), y,\n marker=marker,\n color=color,\n s=self.scatter_size,\n zorder=zorder\n )\n\n # create bezier curves\n codes = [Path.MOVETO] + [Path.CURVE4] * (len(verts) - 1)\n path = Path(verts, codes)\n patch = patches.PathPatch(path, facecolor='none', lw=lw, edgecolor=line_color,\n zorder=zorder, alpha=alpha)\n ax.add_patch(patch)\n\n # plot labels\n if self.plot_labels:\n if upside_down:\n y_list = y_list[::-1]\n self.__add_labels(\n x_list, y_list, ax=ax,\n x_label=x_label, y_label=y_label,\n **kwargs\n )\n\n # xlim and ylim\n if xlim is not None:\n ax.set(xlim=xlim)\n elif ylim is not None:\n ax.set(ylim=ylim)\n\n if return_figax:\n return fig, ax\n return None", "def run_animation(self):\n\n def _get_frame(frame_index, plots):\n \"\"\" Should be called by run_animations only. \"\"\"\n\n # TODO Using the indices of the self.frames, plot in correct location.\n # Okay right now there is a problem where it's unknown whether the set of coordinates\n # is a line or a dot -- that info got lost up there\n\n for amb_index in range(len(self.frames[frame_index])):\n xs = self.frames[frame_index][amb_index][0]\n ys = self.frames[frame_index][amb_index][1]\n\n # if len(xs) > 1:\n # if xs[0] == xs[1]:\n # plots[amb_index][1].set_data([xs[0]], [ys[0]])\n # if xs[-2] == xs[-1]:\n # plots[amb_index][1].set_data([xs[-1]], [ys[-1]])\n\n plots[amb_index][0].set_data(xs, ys)\n\n print(plots[len(self.ambulance_locations)])\n\n return plots,\n\n fig = plt.figure(figsize=(14, 8))\n\n # TODO need [number of ambulances] x [number of states]\n\n plots = []\n for i in range(len(self.ambulance_locations)):\n new_color = self.ambulance_colors[i]\n\n line_plot, = plt.plot([], [],\n marker='+',\n linestyle='',\n markerfacecolor=new_color,\n markeredgecolor=new_color,\n label=\"Ambulance {} Path\".format(i + 1))\n\n # dot_plot, = plt.plot([], [],\n # marker='o',\n # linestyle='',\n # markerfacecolor=new_color,\n # markeredgecolor=new_color)\n\n # plots.append([line_plot, dot_plot])\n\n plots.append([line_plot])\n\n base_plot = plt.scatter([base.longitude for base in self.bases],\n [base.latitude for base in self.bases],\n marker=\"D\", color=\"black\", label=\"Bases\")\n hospital_plot = plt.scatter([hospital.longitude for hospital in self.hospitals],\n [hospital.latitude for hospital in self.hospitals],\n marker=\"P\", color=\"r\", label=\"Hospitals\")\n\n plots.append(base_plot)\n plots.append(hospital_plot)\n\n # TODO Make boundaries parameters\n\n img = plt.imread(\"./visuals/simple.png\")\n plt.imshow(img, extent=[-117.017637, -117.167672, 32.710484, 32.823033])\n plt.legend(loc=\"upper right\")\n print(\"draw the animation\")\n ani = animation.FuncAnimation(fig, _get_frame, len(self.frames),\n fargs=(plots,), interval=50)\n\n plt.show()\n\n # fps = 15\n # print('save the animation')\n # print(\"it may take up to {}\".format(len(self.frames)/fps))\n # ani.save('regional_vis6.mp4', fps=fps, dpi=150)", "def plot_lines(data, title, xlabel, ylabel, labels=None, filename=None):\n ### Check that the data is a list\n if not isinstance(data, list):\n msg = \"data must be a list, not {0}\".format(type(data).__name__)\n raise TypeError(msg)\n\n ### Create a new figure\n fig = pylab.figure()\n\n ### Plot the data\n if labels:\n mylabels = labels[:]\n for i in range(len(data)-len(labels)):\n mylabels.append(\"\")\n for d, l in zip(data, mylabels):\n _plot_dict_line(d, l)\n # Add legend\n pylab.legend(loc='best')\n gca = pylab.gca()\n legend = gca.get_legend()\n pylab.setp(legend.get_texts(), fontsize='medium')\n else:\n for d in data:\n _plot_dict_line(d)\n\n ### Set the lower y limit to 0 or the lowest number in the values\n mins = [min(l.values()) for l in data]\n ymin = min(0, min(mins))\n pylab.ylim(ymin=ymin)\n\n ### Label the plot\n pylab.title(title)\n pylab.xlabel(xlabel)\n pylab.ylabel(ylabel)\n\n ### Draw grid lines\n pylab.grid(True)\n\n ### Show the plot\n fig.show()\n\n ### Save to file\n if filename:\n pylab.savefig(filename)", "def plot(self, noTLS, path_plots, interactive):\n fig = plt.figure(figsize=(10,12))\n ax1 = fig.add_subplot(4, 1, 1)\n ax2 = fig.add_subplot(4, 1, 2)\n ax3 = fig.add_subplot(4, 2, 5)\n ax4 = fig.add_subplot(4, 2, 6)\n ax5 = fig.add_subplot(4, 2, 7)\n ax6 = fig.add_subplot(4, 2, 8)\n\n # First panel: data from each sector\n colors = self._get_colors(self.nlc)\n for i, lci in enumerate(self.alllc):\n p = lci.normalize().remove_outliers(sigma_lower=5.0, sigma_upper=5.0)\n p.bin(5).scatter(ax=ax1, label='Sector %d' % self.sectors[i], color=colors[i])\n self.trend.plot(ax=ax1, color='orange', lw=2, label='Trend')\n ax1.legend(fontsize='small', ncol=4)\n\n # Second panel: Detrended light curve\n self.lc.remove_outliers(sigma_lower=5.0, sigma_upper=5.0).bin(5).scatter(ax=ax2,\n color='black',\n label='Detrended')\n\n # Third panel: BLS\n self.BLS.bls.plot(ax=ax3, label='_no_legend_', color='black')\n mean_SR = np.mean(self.BLS.power)\n std_SR = np.std(self.BLS.power)\n best_power = self.BLS.power[np.where(self.BLS.period.value == self.BLS.period_max)[0]]\n SDE = (best_power - mean_SR)/std_SR\n ax3.axvline(self.BLS.period_max, alpha=0.4, lw=4)\n for n in range(2, 10):\n if n*self.BLS.period_max <= max(self.BLS.period.value):\n ax3.axvline(n*self.BLS.period_max, alpha=0.4, lw=1, linestyle=\"dashed\")\n ax3.axvline(self.BLS.period_max / n, alpha=0.4, lw=1, linestyle=\"dashed\")\n sx, ex = ax3.get_xlim()\n sy, ey = ax3.get_ylim()\n ax3.text(ex-(ex-sx)/3, ey-(ey-sy)/3,\n 'P$_{MAX}$ = %.3f d\\nT0 = %.2f\\nDepth = %.4f\\nDuration = %.2f d\\nSDE = %.3f' %\n (self.BLS.period_max, self.BLS.t0_max,\n self.BLS.depth_max, self.BLS.duration_max, SDE))\n\n\n # Fourth panel: lightcurve folded to the best period from the BLS\n self.folded.bin(1*self.nlc).scatter(ax=ax4, label='_no_legend_', color='black',\n marker='.', alpha=0.5)\n l = max(min(4*self.BLS.duration_max/self.BLS.period_max, 0.5), 0.02)\n nbins = int(50*0.5/l)\n r1, dt1 = binningx0dt(self.folded.phase, self.folded.flux, x0=-0.5, nbins=nbins)\n ax4.plot(r1[::,0], r1[::,1], marker='o', ls='None',\n color='orange', markersize=5, markeredgecolor='orangered', label='_no_legend_')\n\n lc_model = self.BLS.bls.get_transit_model(period=self.BLS.period_max,\n duration=self.BLS.duration_max,\n transit_time=self.BLS.t0_max)\n lc_model_folded = lc_model.fold(self.BLS.period_max, t0=self.BLS.t0_max)\n ax4.plot(lc_model_folded.phase, lc_model_folded.flux, color='green', lw=2)\n ax4.set_xlim(-l, l)\n h = max(lc_model.flux)\n l = min(lc_model.flux)\n ax4.set_ylim(l-4.*(h-l), h+5.*(h-l))\n del lc_model, lc_model_folded, r1, dt1\n\n\n if not noTLS:\n # Fifth panel: TLS periodogram\n ax5.axvline(self.tls.period, alpha=0.4, lw=3)\n ax5.set_xlim(np.min(self.tls.periods), np.max(self.tls.periods))\n for n in range(2, 10):\n ax5.axvline(n*self.tls.period, alpha=0.4, lw=1, linestyle=\"dashed\")\n ax5.axvline(self.tls.period / n, alpha=0.4, lw=1, linestyle=\"dashed\")\n ax5.set_ylabel(r'SDE')\n ax5.set_xlabel('Period (days)')\n ax5.plot(self.tls.periods, self.tls.power, color='black', lw=0.5)\n ax5.set_xlim(0, max(self.tls.periods))\n\n period_tls = self.tls.period\n T0_tls = self.tls.T0\n depth_tls = self.tls.depth\n duration_tls = self.tls.duration\n FAP_tls = self.tls.FAP\n\n sx, ex = ax5.get_xlim()\n sy, ey = ax5.get_ylim()\n ax5.text(ex-(ex-sx)/3, ey-(ey-sy)/3,\n 'P$_{MAX}$ = %.3f d\\nT0 = %.1f\\nDepth = %.4f\\nDuration = %.2f d\\nFAP = %.4f' %\n (period_tls, T0_tls, 1.-depth_tls, duration_tls, FAP_tls))\n\n # Sixth panel: folded light curve to the best period from the TLS\n ax6.plot(self.tls.folded_phase, self.tls.folded_y, color='black', marker='.',\n alpha=0.5, ls='None', markersize=0.7)\n l = max(min(4*duration_tls/period_tls, 0.5), 0.02)\n nbins = int(50*0.5/l)\n r1, dt1 = binningx0dt(self.tls.folded_phase, self.tls.folded_y,\n x0=0.0, nbins=nbins, useBinCenter=True)\n ax6.plot(r1[::,0], r1[::,1], marker='o', ls='None', color='orange',\n markersize=5, markeredgecolor='orangered', label='_no_legend_')\n ax6.plot(self.tls.model_folded_phase, self.tls.model_folded_model, color='green', lw=2)\n ax6.set_xlim(0.5-l, 0.5+l)\n h = max(self.tls.model_folded_model)\n l = min(self.tls.model_folded_model)\n ax6.set_ylim(l-4.*(h-l), h+5.*(h-l))\n ax6.set_xlabel('Phase')\n ax6.set_ylabel('Relative flux')\n del r1, dt1\n\n fig.subplots_adjust(top=0.98, bottom=0.05, wspace=0.25, left=0.1, right=0.97)\n fig.savefig(os.path.join(path_plots, 'TIC%d.pdf' % self.TIC))\n if interactive:\n plt.show()\n plt.close('all')\n del fig", "def _display_from_tsne(self, x, y):\n\n # Find the closest 9\n inds = np.argsort(np.sum( (self._Y_tsne-np.array([x, y]))**2, axis=1))\n print(inds[:10])\n\n # Plot the green circles on the tsne plot\n self._display_tsne()\n self._tsne_window.plot(self._Y_tsne[inds[:9],0], self._Y_tsne[inds[:9],1], 'yo')\n\n # Now run through the 9 sub axes and display the image data and cutout location.\n self._sub_window_filenames = []\n for ii, axis in enumerate(self._sub_windows):\n axis.clear()\n\n fits_filename, filename, sliceno, middle = self._process_result_filename_cutout_number[inds[ii]]\n print('display from tsne fits: {} filename: {}'.format(fits_filename, filename))\n\n # So, the filename actually contains the wrong path on it so we\n # need to take it off and use the proper path.\n pf = pickle.load(open(os.path.join(self._cutouts_directory, filename), 'rb'))\n ff = list(glob.iglob('{}/**/{}'.format(self._data_directory, pf['filename'].split('/')[-1])))[0]\n\n print(ff)\n self._display_window(axis, ff)\n self._sub_window_filenames.append(fits_filename)\n\n # Draw the line\n axis.plot([middle[0]-112, middle[0]-112], [middle[1]-112, middle[1]+112], 'y')\n axis.plot([middle[0]+112, middle[0]+112], [middle[1]-112, middle[1]+112], 'y')\n axis.plot([middle[0]-112, middle[0]+112], [middle[1]-112, middle[1]-112], 'y')\n axis.plot([middle[0]-112, middle[0]+112], [middle[1]+112, middle[1]+112], 'y')\n\n plt.figure(1).show()\n plt.figure(1).canvas.draw()", "def app_SN_last_24h_plots(self, dfs, phase_dirs, sizer_val, styler_val):\n\n print('Currently plotting con_last_24h_plots')\n\n for df, direc in zip(dfs, phase_dirs):\n df = self.data_processor.create_24h_subset(df)\n self.plotter.plot_SN_separate_temperature_series(\n df=df,\n phase_directory=direc,\n folder=self.large_test_data.folders['06_last_24_hours'],\n sizer=self.formatter.sizer[sizer_val]['1x1_full_width'],\n styler=self.formatter.styler[styler_val],\n xaxis_type='datetime',\n last_day=True\n )\n self.plotter.plot_SN_separate_moisture_series(\n df=df,\n phase_directory=direc,\n folder=self.large_test_data.folders['06_last_24_hours'],\n sizer=self.formatter.sizer[sizer_val]['1x1_full_width'],\n styler=self.formatter.styler[styler_val],\n xaxis_type='datetime',\n last_day=True\n )\n self.plotter.plot_SN_combined_temperature_series(\n df=df,\n phase_directory=direc,\n folder=self.large_test_data.folders['06_last_24_hours'],\n sizer=self.formatter.sizer[sizer_val]['3x1_full_width'],\n styler=self.formatter.styler[styler_val],\n xaxis_type='datetime',\n last_day=True,\n )\n self.plotter.plot_SN_combined_moisture_series(\n df=df,\n phase_directory=direc,\n folder=self.large_test_data.folders['06_last_24_hours'],\n sizer=self.formatter.sizer[sizer_val]['3x1_full_width'],\n styler=self.formatter.styler[styler_val],\n xaxis_type='datetime',\n last_day=True,\n )\n\n print('Done\\n')", "def plot_data(self, data, backup_frame):\n title = self.filename.split('-')\n final_titles = title[2].split('.')\n self.final_title_sub = final_titles[0].lower()\n\n # Accounts for the three types of graph required\n # date for archival purposes\n # web for the web server and\n # log for the logarithmic graphs\n graph_list = ['date', 'web', 'log']\n for mode in graph_list:\n for column in data.columns:\n data['Rest of the World'] = \\\n backup_frame['Global_Cases'] - data[column]\n x_axis = data.index.values\n\n fig, axes = plt.subplots()\n axes.plot(x_axis, data[column], marker='o',\n label=column)\n axes.plot(x_axis, data['Rest of the World'], marker='s',\n label='Rest of the World')\n fig.autofmt_xdate()\n\n every_nth = 4\n for number, label in enumerate(axes.xaxis.get_ticklabels()):\n if number % every_nth != 0:\n label.set_visible(False)\n\n axes.set(xlabel='Date', ylabel='Cases',\n title=f'Covid-19 {self.final_title_sub} '\n f'cases for {column} - data from '\n f'John Hopkins CSSE')\n axes.grid()\n axes.legend()\n\n # Setting the y-axis\n if mode == 'log':\n axes.set_yscale('log')\n else:\n data_max = data.max(axis=1)\n max_number = data_max[-1]\n rounded_max = self.round_up(max_number, -3)\n rounded_max += 2000\n axes.set_ylim([0, rounded_max])\n\n # -----------------------------------------------------\n # Adds Labels to annotate the last data point for each\n # plot\n y_axis1 = data[column][-1]\n y_axis2 = data['Rest of the World'][-1]\n\n plt.annotate(y_axis1, (x_axis[-1], y_axis1 + 500),\n bbox=dict(facecolor='blue', alpha=0.5),\n fontsize=12)\n plt.annotate(y_axis2, (x_axis[-1], y_axis2 + 500),\n bbox=dict(facecolor='red', alpha=0.5),\n fontsize=12)\n # -----------------------------------------------------\n\n # Required in order to stop the column from summing\n # the total of each run through the loop\n # otherwise this leads to Rest of World values in the\n # millions\n data = data.drop('Rest of the World', axis=1)\n\n if mode == 'log':\n dir_name = f'{self.out_dir}docs/graphics/' \\\n f'log_' \\\n f'{self.final_title_sub}_for_' \\\n f'{column}.png'\n elif mode == 'date':\n dir_name = f'{self.out_dir}docs/graphics/' \\\n f'{x_axis[-1]}-2020-' \\\n f'{self.final_title_sub}_for_{column}.png'\n\n elif mode == 'web':\n dir_name = f'{self.out_dir}docs/graphics/' \\\n f'{self.final_title_sub}_for_{column}.png'\n\n else:\n print('error')\n\n fig.savefig(dir_name, transparent=False, dpi=300,\n bbox_inches=\"tight\")\n\n if os.path.exists(dir_name):\n logging.debug('File saved at: %s', {dir_name})\n print(f'Files saved at:\\n'\n f'{dir_name}\\n')\n else:\n logging.debug('Failed to save')\n logging.debug(os.getcwd())\n plt.close()\n return data", "def plot_scatter_points_lines(self):\n self.plot(2)", "def draw_table(ax, dfs, legend, x, y):\n col_labels = dfs_all_values(dfs, x)\n column_legend = []\n cell_text = []\n # loop over all pandas.DataFrame objects\n for df in dfs:\n # to allow query y(x) easily\n df = df.set_index(x)\n df_row = df[y]\n # build a row with filled blanks '-'\n row = [\"{:.2f}\".format(df_row[column]) if column in df_row.index else '-' \\\n for column in col_labels]\n cell_text.append(row)\n\n ax.axis('tight')\n ax.axis('off')\n ax.table(cellText=cell_text, rowLabels=legend, colLabels=col_labels, \\\n loc='top')", "def draw_fixed_lines(canvas):\n canvas.delete('all') # delete all existing lines from the canvas\n\n # Write your code below this line\n #################################\n # Draw the top frame line for the line chart.\n canvas.create_line(GRAPH_MARGIN_SIZE, GRAPH_MARGIN_SIZE, CANVAS_WIDTH - GRAPH_MARGIN_SIZE,\n GRAPH_MARGIN_SIZE, width=LINE_WIDTH)\n # Draw the bottom frame line for the line chart.\n canvas.create_line(GRAPH_MARGIN_SIZE, CANVAS_HEIGHT - GRAPH_MARGIN_SIZE, CANVAS_WIDTH - GRAPH_MARGIN_SIZE,\n CANVAS_HEIGHT - GRAPH_MARGIN_SIZE, width=LINE_WIDTH)\n # Draw vertical lines and texts for line chart by year.\n for yr in range(len(YEARS)):\n x = get_x_coordinate(width=CANVAS_WIDTH, year_index=yr)\n canvas.create_line(x, 0, x, CANVAS_HEIGHT, width=LINE_WIDTH)\n canvas.create_text(x + TEXT_DX, CANVAS_HEIGHT - GRAPH_MARGIN_SIZE, text=YEARS[yr], anchor=tkinter.NW)", "def axes_subplots():\n # gerenate data\n x = np.arange(0, 6 * np.pi+0.2, 0.2)\n y_1 = np.cos(x)\n y_2 = np.sin(2*x)\n y_3 = y_1 + y_2\n\n # display multiple\n fig, axs = plt.subplots(3, 1, sharex=True)\n fig.suptitle('Subplots w/ shared axes')\n axs[0].plot(x, y_1)\n axs[1].plot(x, y_2)\n axs[2].plot(x, y_3)\n axs[0].set_ylabel('$y$')\n axs[1].set_ylabel('$y$')\n axs[2].set_ylabel('$y$')\n\n plt.show()\n\n return None", "def draw(self,ax):\n # remove cell lines if thery are on the plot\n # (if new axes are created the cell lines will be not there)\n for line in self.cell_lines:\n try:\n ax.lines.remove(line)\n except ValueError:\n pass\n # create new list with cell boundaries\n ylims = ax.get_ylim()\n self.cell_lines = [ ax.plot(xx,ylims,'k:')[0] \n for xx in zip(self.xx_cells,self.xx_cells) ]", "def plot_lines(line_list, line_width=1.0):\n \n for line in line_list: \n start_lat, end_lat, start_lon, end_lon, color, style, input_projection, resolution = line\n \n assert style in list(line_style_dict.keys())\n assert resolution in ['high', 'low']\n\n start_lat = float(start_lat)\n start_lon = float(start_lon)\n end_lat = float(end_lat)\n end_lon = float(end_lon)\n\n lons = iris.analysis.cartography.wrap_lons(numpy.array([start_lon, end_lon]), 0, 360)\n # FIXME: start=0 might not work for all input/output projection combos\n\n if resolution == 'low':\n lats = numpy.array([start_lat, end_lat]) \n elif resolution == 'high':\n assert start_lat == end_lat or start_lon == end_lon, \\\n \"High res lines need constant lat or lon in reference coordinate system\"\n\n if start_lat == end_lat:\n lons = numpy.arange(lons[0], lons[-1] + 0.5, 0.5)\n lats = numpy.repeat(start_lat, len(lons))\n else:\n lats = numpy.arange(start_lat, end_lat + 0.5, 0.5)\n lons = numpy.repeat(lons[0], len(lats))\n\n plt.plot(lons, lats, \n linestyle=line_style_dict[style], \n color=color, linewidth=line_width,\n transform=input_projections[input_projection])", "def gdraw_line2(df=data):\n gr = df.groupby(['DOBorough', 'weekday'])\\\n .agg(trip_counts=('VendorID', 'count'))\\\n .reset_index(drop=False)\n return px.line(gr, x='weekday', y='trip_counts', color='DOBorough')\\\n .update_layout(\n template='plotly_dark',\n plot_bgcolor='rgba(0, 0, 0, 0)',\n paper_bgcolor='rgba(0, 0, 0, 0)',\n )", "def _calculate_y_lines(self, dists):\n tot_dist = sum(dists)\n if tot_dist > 0:\n pixel_dist = [float(d * (self._g_height - 20)) / tot_dist for d in dists]\n pixel_grid = [0] + [int(pd + sum(pixel_dist[0:i])) for i, pd in\n enumerate(pixel_dist)]\n else:\n pixel_grid = []\n\n return pixel_grid", "def _plot_drawdown(self, ts, bm_ts, ax=None):\n def two_dec_format(x, pos):\n return \"{:.0%}\".format(x)\n\n equity = ts\n\n if ax is None:\n ax = plt.gca()\n\n y_axis_formatter = FuncFormatter(two_dec_format)\n ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))\n ax.xaxis.set_tick_params(reset=True)\n ax.yaxis.grid(linestyle=':')\n ax.xaxis.set_major_locator(mdates.YearLocator(1))\n ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y'))\n ax.xaxis.grid(linestyle=':')\n\n if bm_ts is not None:\n benchmark = bm_ts\n benchmark.plot(kind='area', lw=2, color='grey', label=\"Benchmark\", alpha=0.60, ax=ax)\n\n equity.plot(kind='area', lw=2, color='xkcd:azure', alpha=0.6, x_compat=False, label='Strategy', ax=ax)\n ax.set_ylabel('Drawdown')\n ax.legend(loc='best')\n ax.set_xlabel('')\n plt.setp(ax.get_xticklabels(), visible=True, rotation=0, ha='center')\n\n return ax", "def plot_multiple_timeline(logs, remove_redundant_entries, select_string):\n\tfig, ax = plt.subplots(figsize=(11,6))\n\tfig.autofmt_xdate()\n\tline2D_array = []\n\tplot_data_dict = {}\n\tfor l in logs:\n\t\tselected_sources = _transform_select_string(select_string,l)\n\t\tpld, lines, dates, _ = l.give_plot_data(remove_redundant_entries=remove_redundant_entries, sources=selected_sources)\n\t\ttmp, = ax.plot(dates, lines, label=l.name, picker=4, marker='.', linestyle='-', linewidth=0.5, ms=3.5)\n\t\tline2D_array.append(tmp)\n\t\tplot_data_dict[tmp.get_c()] = pld\n\tmyFmt = DateFormatter(\"%Y %d.%b %H:%M:%S\")\n\tax.xaxis.set_major_formatter(myFmt)\n\tnames = ' and '.join([x.name for x in logs])\n\tif remove_redundant_entries:\n\t\tplt.title('Analysis of the files ' + names +'\\n' + 'where all entries having the same timestamp are removed')\n\t\tplt.subplots_adjust(left=0.1, bottom=0.18, right=0.9, top=0.90)\n\telse:\n\t\tplt.title('Analysis of the files ' + names)\n\t\tplt.subplots_adjust(left=0.1, bottom=0.18, right=0.9, top=0.95)\n\tplt.legend()\n\tannot = ax.annotate(\"\", xy=(0,0), xytext=(0.01,0.01) ,textcoords='figure fraction', bbox=dict(boxstyle=\"round\", fc=\"cyan\"), arrowprops=dict(arrowstyle=\"->\"))\n\tannot.set_visible(False)\n\tax.set_xlabel('timestamps in UTC')\n\tax.set_ylabel('sequential id')\n\n\tdef update_annot(l,ind):\n\t\tplot_data = plot_data_dict[l.get_c()]\n\t\tx,y = l.get_data()\n\t\tannot.xy = (x[ind[\"ind\"][0]], y[ind[\"ind\"][0]])\n\t\tif remove_redundant_entries == 1:\n\t\t\ttext = plot_data[y[ind[\"ind\"][0]]-1]\n\t\telse:\n\t\t\ttemp = [x for x in plot_data if x.id == y[ind[\"ind\"][0]]]\n\t\t\ttext = temp[0]\n\t\tannot.set_text(text)\n\t\tannot.get_bbox_patch().set_alpha(0.4)\n\n\tdef hover(event):\n\t\tvis = annot.get_visible()\n\t\tif event.inaxes == ax:\n\t\t\tfor l in line2D_array:\n\t\t\t\tcont, ind = l.contains(event)\n\t\t\t\tif cont:\n\t\t\t\t\tupdate_annot(l,ind)\n\t\t\t\t\tannot.set_visible(True)\n\t\t\t\t\tfig.canvas.draw_idle()\n\t\t\t\telse:\n\t\t\t\t\tif vis:\n\t\t\t\t\t\tannot.set_visible(False)\n\t\t\t\t\t\tfig.canvas.draw_idle()\n\n\tfig.canvas.mpl_connect(\"motion_notify_event\", hover)\n\tfig.canvas.mpl_connect('key_press_event', _quit_figure)", "def plot_data(DataFrame, title, ps, pe, columns: list):\n\n ax = plt.gca()\n idx = 0\n colors = ['blue', 'red', 'green', 'black']\n plot_titles = title.split('/')\n plot_title = plot_titles[1]\n\n for c in columns:\n DataFrame[ps:pe].plot(kind='line',y=c, color=colors[idx], ax=ax, fontsize=4, title=plot_title)\n idx+=1\n\n\n plt.savefig('{}.jpg'.format(title))\n plt.show()", "def plot_results(self, a):\n import matplotlib.pyplot as plt\n fig, axes = plt.subplots(nrows=len(a.data_vars), sharex='all', sharey='all')\n for ax, var in zip(axes, a.data_vars):\n data = a[var]\n plt.sca(ax)\n data.plot(x='time', cmap=plt.cm.viridis_r, yincrease=False, robust=True)\n plt.show()", "def display(self):\r\n \r\n plt.rcParams['font.size'] = 14\r\n plt.rcParams['axes.linewidth'] = 1.2 # 1.2 for single plot, 0.5 for all 6\r\n plt.rcParams['lines.linewidth'] = 20.0 # Aah, this doesn't work because line width is changed later on\r\n\r\n cwd = os.getcwd() # Gets current working directory.\r\n cwd = cwd.replace('\\\\', '/')\r\n path = cwd + directory # This is the folder all the results are stored in.\r\n \r\n if type(array_element) == str:\r\n dataframes = [file + array_element] # This is to pass a single csv file\r\n else:\r\n dataframes = [file + i for i in array_element] # This is a list so you can pass multiple csv files to be overlayed on the same plot.\r\n\r\n colours = ['black', 'darkred', 'darkmagenta', 'darkturquoise', 'saddlebrown'] # Array of colours for the lines.\r\n\r\n dfE = pd.read_csv(cwd + \"/experimental_data.csv\") # Reads in the experimental data as a pandas dataframe.\r\n\r\n # Rescale the x-axis of the experimental data.\r\n ratio_of_capacities = 272.4 / 338.313338 # experimental maximum capacity / theoretical maximum capacity\r\n dfE[\"x_theo\"] = ratio_of_capacities * dfE[\"x\"]\r\n # 'x' is the experimental x and 'x_theo' is the theoretical x.\r\n\r\n # Second derivative of enthalpy for experimental data. One w/ respect to the experimental x and one w/ respect to theoretical x.\r\n secder_enthalpy_experimental_x = np.gradient(np.array(dfE['Enthalpy dH/dx']), np.array(dfE['x']))\r\n secder_enthalpy_experimental_x_theo = np.gradient(np.array(dfE['Enthalpy dH/dx']), np.array(dfE['x_theo']))\r\n dfE['secder enthalpy x'] = secder_enthalpy_experimental_x\r\n dfE['secder enthalpy x theo'] = secder_enthalpy_experimental_x_theo\r\n\r\n # vertical shift on p.m. entropy for vibrational effect\r\n vibrational_shift = 0.0108 # eV K this includes being multiplied by the ratio of capacities.\r\n dfE[\"Entropy dS/dx\"] = (dfE[\"Entropy dS/dx\"]) - vibrational_shift\r\n\r\n # Integrates the p.m. entropy\r\n entropy_list_experimental = integrate.cumtrapz(dfE['Entropy dS/dx'], dfE['x'],\r\n initial=0) # Contains the entropy values\r\n dfE['Entropy'] = entropy_list_experimental\r\n\r\n dfE['x_new'] = ((dfE['x_theo'] - dfE['x_theo'].iloc[0]) * dfE['x_theo'][73]) / (dfE['x_theo'][73] - dfE['x_theo'].iloc[0]) # Rescales the line so that the experimental data starts at 0.\r\n dfE['x'] = ((dfE['x'] - dfE['x'].iloc[0]) * dfE['x'][73]) / (dfE['x'][73] - dfE['x'].iloc[0]) # Same as above but for experimental x axis.\r\n\r\n # Calculates the analytical solution\r\n points = 1000\r\n x_pos = np.linspace(0, 1, points) # x for p.m. entropy\r\n y_pos = np.linspace(0, 1, points) # y for p.m. etropy\r\n s_x = np.linspace(0, 1, points) # x for entropy\r\n s_y = np.linspace(0, 1, points) # y for entropy\r\n l = 0.329217689 # This must be the same as what was used in the main script\r\n R = -0.0000862 # eV/K.Site\r\n T = 288 # K\r\n for index, x in enumerate(x_pos):\r\n if x < l:\r\n s_y[index] = (R * (x * np.log(x / l) - (x - l) * np.log((l - x) / l))) * T\r\n y_pos[index] = T * R * (np.log(x / l) - np.log((l - x) / l))\r\n else:\r\n s_y[index] = (R * l * (\r\n (x / l - 1) * np.log(x / l - 1) + (1 - x) / l * np.log((1 - x) / l) - (1 - l) / l * np.log(\r\n (1 - l) / l))) * T\r\n y_pos[index] = T * R * (np.log(x / l - 1) - np.log(1 / l - x / l))\r\n\r\n # Calculates the single solid state entropy\r\n x_ent = np.linspace(0, 1, points)\r\n y_ent = np.linspace(0, 1, points)\r\n for index, x in enumerate(x_ent):\r\n y_ent[index] = T * R * (x * np.log(x) + (1-x) * np.log(1-x))\r\n \r\n \"\"\"\r\n #\r\n #\r\n # Create plot and formats\r\n #\r\n #\r\n \"\"\"\r\n \r\n fig, axes = plt.subplots(nrows=num_row, ncols=num_col, constrained_layout=True, squeeze=False)\r\n # squeeze=False is needed to prevent errors when plotting a single subplot\r\n plt.rc('legend', fontsize=13, handlelength=1)\r\n plt.rc('tick')\r\n lw = 1.5 # Line width\r\n \r\n plt.tick_params(bottom=True, top=True, left=True, right=True)\r\n plt.tick_params(labelbottom=True, labeltop=False, labelleft=True, labelright=False)\r\n plt.tick_params(direction='in', width=1.2, length=4.5, pad=3) # For single plot\r\n # plt.tick_params(direction='in', width=1, length=4.5, pad=3) # For multiple plots\r\n\r\n marker_list = ['v', '^', 'p', 'o']\r\n mark_size = 3 #0.7 for 6 plots\r\n \r\n colours = ['#176ba0', '#af4bce', 'orangered', '#48a11b', '#3caea3'] #'#af4bce'\r\n common_legend = ['400 Averaging Steps', '800 Averaging Steps', '2000 Averaging Steps']\r\n \r\n if num_col==2 and num_row==3: # This will work when using the original axes dimensions (3 rows, 2 columns)\r\n placement = dict([\r\n ('voltage', axes[0, 0]),\r\n ('dS/dx', axes[0, 1]),\r\n ('dQ/dV', axes[1, 0]),\r\n ('dH/dx', axes[1, 1]),\r\n ('S', axes[2, 0]),\r\n ('d/dx(dH/dx)', axes[2, 1])\r\n ])\r\n else: # If axes dimensions are different, I'm probably trying to plot one graph\r\n \"\"\"\r\n If plotting more than one graph, the position on the plot in the subplot can be adjusted\r\n by appropriately altering the axes[] parameter. For the graphs that are not being plotted, \r\n leave their position as axes[0, 0].\r\n \"\"\"\r\n placement = dict([\r\n ('voltage', axes[0, 0]),\r\n ('dS/dx', axes[0, 0]),\r\n ('dQ/dV', axes[0, 0]),\r\n ('dH/dx', axes[0, 0]),\r\n ('S', axes[0, 0]),\r\n ('d/dx(dH/dx)', axes[0, 0])\r\n ])\r\n \r\n # Plots all of the experimental data\r\n if experimental_plot == True:\r\n if pick_plot['voltage'] == True:\r\n dfE.plot(linestyle='-', color='darkgreen', lw=lw, ax=placement['voltage'], x='x_new', y='OCV')\r\n dfE.plot(linestyle='-', color='darkblue', lw=lw, ax=placement['voltage'], x='x', y='OCV')\r\n \r\n if pick_plot['dS/dx'] == True:\r\n ax2 = dfE.plot(linestyle='-', color='darkgreen', lw=lw, ax=placement['dS/dx'], x='x_new', y='Entropy dS/dx')\r\n dfE.plot(linestyle='-', color='darkblue', lw=lw, ax=placement['dS/dx'], x='x', y='Entropy dS/dx')\r\n \r\n if pick_plot['dQ/dV'] == True:\r\n dfE.plot(linestyle='-', color='darkgreen', lw=lw, ax=placement['dQ/dV'], x='OCV', y='dQdV') \r\n \r\n if pick_plot['dH/dx'] == True:\r\n dfE.plot(linestyle='-', color='darkgreen', lw=lw, ax=placement['dH/dx'], x='x_new', y='Enthalpy dH/dx')\r\n dfE.plot(linestyle='-', color='darkblue', lw=lw, ax=placement['dH/dx'], x='x', y='Enthalpy dH/dx')\r\n \r\n if pick_plot['S'] == True:\r\n ax5 = dfE.plot(linestyle='-', color='darkgreen', lw=lw, ax=placement['S'], x='x_new', y='Entropy')\r\n \r\n if pick_plot['d/dx(dH/dx)'] == True:\r\n dfE.plot(linestyle='-', color='darkgreen', lw=lw, ax=placement['d/dx(dH/dx)'], x='x_new', y='secder enthalpy x theo')\r\n dfE.plot(linestyle='-', color='darkblue', lw=lw, ax=placement['d/dx(dH/dx)'], x='x', y='secder enthalpy x')\r\n\r\n # Iterate through all the data to be plotted\r\n if simulation_plot == True:\r\n for count, df in enumerate(dataframes):\r\n df1 = pd.read_csv(path + df) # reads file into a dataframe.\r\n \r\n df1 = df1.replace(0, np.nan).dropna(axis=0, how='all') # For the rows with all '0' entries they are replaced with 'nan' and then these rows are dropped.\r\n df1 = df1.replace(np.nan, 0) # As some legitimate 0 entries such as 0 volts we flip back the remaining from 'nan' to 0.\r\n \r\n # Integrates the p.m. entropy\r\n entropy_list = integrate.cumtrapz(df1['Partial molar entropy'], df1['Total mole fraction'],\r\n initial=0) # Contains the entropy values\r\n df1['Entropy'] = entropy_list\r\n \r\n # Rescale voltage profile and p.m. enthalpy by the chain rule.\r\n df1[\"adjusted voltage\"] = df1[\"Chemical potential\"] * ratio_of_capacities\r\n df1[\"adjusted enthalpy\"] = df1[\"Partial molar enthalpy\"] * ratio_of_capacities\r\n df1[\"adjusted entropy\"] = df1[\"Partial molar entropy\"] * ratio_of_capacities\r\n df1[\"adjusted dq/de\"] = df1[\"dq/de\"] * (1/ratio_of_capacities)**2\r\n \r\n # Differentiate the p.m. enthalpy to get the second derivative.\r\n pm_enthalpy = np.array(df1['adjusted enthalpy'])\r\n mole_fraction = np.array(df1['Total mole fraction'])\r\n secder_enthalpy = np.gradient(pm_enthalpy, mole_fraction)\r\n df1['secder enthalpy'] = secder_enthalpy\r\n \r\n if pick_plot['voltage'] == True:\r\n ax1 = df1.plot(linestyle='-', color=colours[count], lw=lw, marker=marker_list[count], markeredgecolor=colours[count],\r\n markersize=mark_size, ax=placement['voltage'], x='Total mole fraction', y='adjusted voltage')\r\n ax1.set_xlim([0, 1])\r\n ax1.set_xlabel('Na content $[x]$')\r\n ax1.set_ylabel('Voltage $[V]$')\r\n ax1.legend(common_legend) \r\n # ax1.legend(['Experimental data (Adjusted x)', 'Raw experimental data', 'Monte Carlo data'])\r\n \r\n if pick_plot['dS/dx'] == True:\r\n ax2 = df1.plot(linestyle='-', color=colours[count], lw=lw, marker=marker_list[count], markeredgecolor=colours[count],\r\n markersize=mark_size, ax=placement['dS/dx'], x='Total mole fraction', y='adjusted entropy')\r\n # ax2.plot(x_pos, y_pos, linewidth=lw, color='red') # Plots the ideal p.m. entropy\r\n ax2.set_xlim([0, 1])\r\n ax2.set_xlabel('Na content $[x]$')\r\n ax2.set_ylabel('$\\\\frac{dS}{dx}$ $[eV K/site]$')\r\n ax2.legend(common_legend) \r\n # ax2.legend(['Experimental data (Adjusted x)', 'Raw experimental data', 'Monte Carlo data', 'Analytical solution'])\r\n \r\n if pick_plot['dQ/dV'] == True:\r\n ax3 = df1.plot(linestyle='-', color=colours[count], lw=lw, marker=marker_list[count], markeredgecolor=colours[count],\r\n markersize=mark_size, ax=placement['dQ/dV'], x='Chemical potential', y='adjusted dq/de') \r\n ax3.set_xlim([-0.1, 1])\r\n ax3.set_xlabel('Voltage $[V]$')\r\n ax3.set_ylabel('$\\\\frac{dQ}{dV}$ [$\\mathregular{eV^{-1}}$]')\r\n ax3.legend(common_legend)\r\n # ax3.legend(['Experimental data', 'Monte Carlo Data'])\r\n \r\n if pick_plot['dH/dx'] == True:\r\n ax4 = df1.plot(linestyle='-', color=colours[count], lw=lw, marker=marker_list[count], markeredgecolor=colours[count],\r\n markersize=mark_size, ax=placement['dH/dx'], x='Total mole fraction', y='adjusted enthalpy')\r\n ax4.set_xlim([0, 1])\r\n ax4.set_xlabel('Na content $[x]$')\r\n ax4.set_ylabel('$\\\\frac{dH}{dx}$ $[eV/site]$')\r\n ax4.legend(common_legend) \r\n # ax4.legend(['Experimental data (Adjusted x)', 'Raw experimental data', 'Monte Carlo data'])\r\n \r\n if pick_plot['d/dx(dH/dx)'] == True:\r\n ax5 = df1.plot(linestyle='-', color=colours[count], lw=lw, marker=marker_list[count], markeredgecolor=colours[count],\r\n markersize=mark_size, ax=placement['d/dx(dH/dx)'], x='Total mole fraction', y='secder enthalpy')\r\n ax5.set_xlim([0, 1])\r\n ax5.set_ylim([0, 6])\r\n ax5.set_xlabel('Na content $[x]$')\r\n ax5.set_ylabel('$\\\\frac{d^2H}{dx^2}$ $[eV/site]$')\r\n ax5.legend(common_legend)\r\n \r\n # ax5.legend(['Experimental data (Adjusted x)', 'Raw experimental data', 'Monte Carlo data'])\r\n \r\n if pick_plot['S'] == True:\r\n ax6 = df1.plot(linestyle='-', color=colours[count], lw=lw, marker=marker_list[count], markeredgecolor=colours[count],\r\n markersize=mark_size, ax=placement['S'], x='Total mole fraction', y='Entropy')\r\n \r\n # ax6.plot(s_x, s_y, linewidth=lw, color='red') # Plots the entropy for l=0.32...\r\n # ax6.plot(x_ent, y_ent, linewidth=lw, color='grey') # Plots the entropy for solid state solution.\r\n ax6.set_xlim([0, 1])\r\n ax6.set_xlabel('Na content $[x]$')\r\n ax6.set_ylabel('S $[eV K/site]$')\r\n ax6.legend(common_legend)\r\n # ax6.legend(['Experimental data', 'Monte Carlo data', 'Analytical solution', 'Solid state solution'], loc='upper right', bbox_to_anchor=(0.75, 0.5))\r\n \r\n \r\n\r\n # parameter_file = open(path + \"/Input_arguments_\" + uid + \".txt\", \"w\")\r\n # parameter_file.write(str(self.args))\r\n # parameter_file.close()\r\n\r\n # manager = plt.get_current_fig_manager()\r\n # # manager.resize(*manager.window.maxsize())\r\n # # fig_path = cwd + \"/Na_plot_results.png\"\r\n # # plt.savefig(path + \"/Na_monte_carlo_plot_\" + uid + \".png\")\r\n # plt.show()\r\n \r\n plt.savefig(\"Varying sps Overlaid Plots - dQ_dV\", dpi = 300)\r\n\r\n plt.show()", "def plotScopeHitGroups(day,scope,shot,df):\n\n plotScope(day,scope,shot)\n #x = df.ix[day].ix[shot]\n x = df.ix[shot]\n for p in range(411,415):\n if p==411:\n ax = plt.subplot(p)\n else:\n plt.subplot(p,sharex=ax)\n for id in x.index:\n row = x.ix[id]\n xx = row[[('tMax',det) for det in ['H1','H2','LaBr1','LaBr2','UB1','UB2','UB3','UB4']]]\n yy = row[[('ampMax',det) for det in ['H1','H2','LaBr1','LaBr2','UB1','UB2','UB3','UB4']]]\n plt.scatter(xx,yy)\n x1 = np.nanmin(xx); x2 = np.nanmax(xx)\n y1 = np.nanmin(yy); y2 = np.nanmax(yy)\n plt.plot([x1,x2,x2,x1,x1],[y1,y1,y2,y2,y1])", "def seas_line_plot(df, fwd=None, **kwargs):\n\n fig = go.Figure()\n traces = cptr.seas_plot_traces(df, fwd, **kwargs)\n if \"shaded_range\" in traces and traces[\"shaded_range\"]:\n for trace in traces[\"shaded_range\"]:\n fig.add_trace(trace)\n\n if \"average_line\" in traces:\n fig.add_trace(traces[\"average_line\"])\n\n if \"hist\" in traces:\n for trace in traces[\"hist\"]:\n fig.add_trace(trace)\n\n if \"fwd\" in traces:\n for trace in traces[\"fwd\"]:\n fig.add_trace(trace)\n\n fig.layout.xaxis.tickvals = pd.date_range(\n start=str(dates.curyear), periods=12, freq=\"MS\"\n )\n\n title = cpu.gen_title(df, **kwargs)\n legend = go.layout.Legend(font=dict(size=10), traceorder=\"reversed\")\n yaxis_title = kwargs.get(\"yaxis_title\", None)\n hovermode = kwargs.get(\"hovermode\", \"x\")\n fig.update_layout(\n title=title,\n title_x=0.01,\n xaxis_tickformat=\"%b\",\n yaxis_title=yaxis_title,\n legend=legend,\n hovermode=hovermode,\n margin=preset_margins,\n )\n\n return fig", "def generate_line_plot():\n df = pd.read_csv(\"/Users/maxwell/Documents/workspace/CoronaScan/results.csv\",\n names=[i for i in subreddits])\n\n dates = df.loc['2020-04-28']", "def plot_test_objective_multi(df, exp_config, output_dir, show):\n output_file_name = f\"{inspect.stack()[0][3]}.{FILE_EXTENSION}\"\n output_path = os.path.join(output_dir, output_file_name)\n\n plt.figure()\n\n for exp_name, exp_df in df.items():\n\n if \"rep\" in exp_config[\"data\"][exp_name]:\n\n exp_dfs = exp_df\n\n T = np.linspace(0, exp_config[\"t_max\"], 50000)\n\n y_list = []\n for i, df_i in enumerate(exp_dfs):\n\n df_i = process_for_test_objective(\n df_i.sort_values(\"timestamp_end\"),\n mode=MODE,\n max_budget=exp_config[\"max_budget\"],\n )\n x = df_i.loc[df_i[\"max_idx\"]][\"timestamp_end\"].values\n y = df_i.loc[df_i[\"max_idx\"]][exp_config[\"test_objective\"]].values\n\n f = interp1d(x, y, kind=\"previous\", fill_value=\"extrapolate\")\n y = exp_config.get(\"best_objective\", 1) - f(T)\n y_list.append(y)\n\n y_list = np.asarray(y_list)\n y_mean = y_list.mean(axis=0)\n y_std = y_list.std(axis=0)\n y_se = y_std / np.sqrt(y_list.shape[0])\n\n plt.plot(\n T,\n y_mean,\n label=exp_config[\"data\"][exp_name][\"label\"],\n color=exp_config[\"data\"][exp_name][\"color\"],\n linestyle=exp_config[\"data\"][exp_name].get(\"linestyle\", \"-\"),\n )\n plt.fill_between(\n T,\n y_mean - 1.96 * y_se,\n y_mean + 1.96 * y_se,\n facecolor=exp_config[\"data\"][exp_name][\"color\"],\n alpha=0.3,\n )\n\n else:\n\n exp_df = process_for_test_objective(\n exp_df.sort_values(\"timestamp_end\"),\n mode=MODE,\n max_budget=exp_config[\"max_budget\"],\n )\n x = exp_df.loc[exp_df[\"max_idx\"]][\"timestamp_end\"].values\n y = exp_df.loc[exp_df[\"max_idx\"]][exp_config[\"test_objective\"]].values\n\n idx = np.unique(x, return_index=True, axis=0)[1]\n\n x = x[idx]\n y = y[idx]\n\n x = np.clip(np.concatenate([x, [exp_config[\"t_max\"]]]), 0, exp_config[\"t_max\"])\n y = np.clip(exp_config.get(\"best_objective\", 1) - np.concatenate([y, [y[-1]]]), 0, 1)\n \n area = aulc(x, y)\n exp_config[\"data\"][exp_name][\"AULC\"] = area\n \n plt.step(\n x[:],\n y[:],\n where=\"post\",\n label=exp_config[\"data\"][exp_name][\"label\"],\n color=exp_config[\"data\"][exp_name][\"color\"],\n marker=exp_config[\"data\"][exp_name].get(\"marker\", None),\n markevery=len(x) // 5,\n linestyle=exp_config[\"data\"][exp_name].get(\"linestyle\", \"-\"),\n )\n\n ax = plt.gca()\n ticker_freq = exp_config[\"t_max\"] / 5\n ax.xaxis.set_major_locator(ticker.MultipleLocator(ticker_freq))\n ax.xaxis.set_major_formatter(minute_major_formatter)\n\n if exp_config.get(\"title\") and PRINT_TITLE:\n plt.title(exp_config.get(\"title\"))\n\n # if MODE == \"min\":\n # plt.legend(loc=\"upper right\")\n # else:\n # plt.legend(loc=\"lower right\")\n plt.legend(loc=exp_config.get(\"legend\", \"best\"))\n\n plt.ylabel(\"Test Regret\")\n plt.xlabel(\"Search time (min.)\")\n\n if exp_config.get(\"ylim\"):\n plt.ylim(*exp_config.get(\"ylim\"))\n\n if exp_config.get(\"xlim\"):\n plt.xlim(*exp_config.get(\"xlim\"))\n else:\n plt.xlim(0, exp_config[\"t_max\"])\n\n if exp_config.get(\"yscale\"):\n plt.yscale(exp_config.get(\"yscale\"))\n\n plt.grid(which=\"minor\", color=\"gray\", linestyle=\":\")\n plt.grid(which=\"major\", linestyle=\"-\")\n plt.tight_layout()\n plt.savefig(output_path, dpi=360)\n if show:\n plt.show()\n plt.close()", "def plot_multifig(data,NO_SENSORS,dataSelection):\n \n # Axis options\n yAxisLimits = [[0,1024],[-3,3]]\n \n # Plots a seperate graph for each sensor\n for i in range(0,NO_SENSORS):\n plt.figure(i + 1)\n plt.title('Sensor ' + str(i + 1))\n plt.plot(data[:,(3 + (4 * i))],data[:,(0 + (4 * i))],label='X Axis')\n plt.plot(data[:,(3 + (4 * i))],data[:,(1 + (4 * i))],label='Y Axis')\n plt.plot(data[:,(3 + (4 * i))],data[:,(2 + (4 * i))],label='Z Axis')\n plt.ylim(yAxisLimits[dataSelection][0],yAxisLimits[dataSelection][1])\n plt.xlabel('Time/s')\n plt.ylabel('Acceleration/g')\n plt.legend()\n plt.show()", "def raster(event_times_list, **kwargs):\n ax = plt.gca()\n for ith, trial in enumerate(event_times_list):\n plt.vlines(trial, ith + .5, ith + 1.5, **kwargs)\n plt.ylim(.5, len(event_times_list) + .5)\n return ax", "def plotTotEfixedY(monthlyData):\n \n #assign x-angles (must be equally distributed between 0 and 90 deg)\n Xangles = np.array(range(len(monthlyData['angles']['x_angles'])))*90./(len(monthlyData['angles']['x_angles'])-1)\n \n fig = plt.figure()\n ax1=plt.subplot(2,1,1)\n plt.plot(Xangles, np.sum(monthlyData['H'], axis=1)/np.mean( np.sum(monthlyData['H'], axis=1)), label='H', color='r')\n plt.plot(Xangles, np.sum(monthlyData['C'], axis=1)/np.mean( np.sum(monthlyData['C'], axis=1)), label='C', color='b')\n plt.plot(Xangles, np.sum(monthlyData['L'], axis=1)/np.mean( np.sum(monthlyData['L'], axis=1)), label='L', color='g')\n plt.plot(Xangles, np.sum(monthlyData['PV'], axis=1)/np.mean( np.sum(monthlyData['PV'], axis=1)), label='PV', color='c')\n plt.plot(Xangles, np.sum(monthlyData['E_HCL'], axis=1)/np.mean( np.sum(monthlyData['E_HCL'], axis=1)), label='E_HCL', color='m')\n plt.plot(Xangles, np.sum(monthlyData['E_tot'], axis=1)/np.mean( np.sum(monthlyData['E_tot'], axis=1)), label='E_tot', color='k')\n plt.legend()\n \n plt.subplot(2,1,2, sharex=ax1)\n plt.plot(Xangles, np.sum(monthlyData['H'], axis=1), label='H', color='r')\n plt.plot(Xangles, np.sum(monthlyData['C'], axis=1), label='C', color='b')\n plt.plot(Xangles, np.sum(monthlyData['L'], axis=1), label='L', color='g')\n plt.plot(Xangles, np.sum(monthlyData['PV'], axis=1), label='PV', color='c')\n plt.plot(Xangles, np.sum(monthlyData['E_HCL'], axis=1), label='E_HCL', color='m')\n plt.plot(Xangles, np.sum(monthlyData['E_tot'], axis=1), label='E_tot', color='k')\n plt.legend()\n \n return fig", "def draw_spike_times(spike_times):\n for line in spike_times:\n plt.axvline(x=line, color='y')", "def multiplot(xvals, yvals=None, datarange=None, cmap='jet', labels=None, marker=None):\n\n if yvals is None:\n yvals = xvals\n xvals = []\n yvals = np.asarray(yvals)\n xvals = np.asarray(xvals)\n\n if datarange is None:\n datarange = range(len(yvals))\n datarange = np.asarray(datarange,dtype=float)\n\n cm = plt.get_cmap(cmap)\n colrange = (datarange - datarange.min()) / (datarange.max() - datarange.min())\n \n if marker is None:\n marker = ''\n linearg = '-' + marker\n\n plt.figure(figsize=FIGURE_SIZE, dpi=FIGURE_DPI)\n for n in range(len(datarange)):\n col = cm(colrange[n])\n if len(xvals) == 0:\n plt.plot(yvals[n], linearg, lw=2, color=col)\n elif len(xvals.shape) == 1:\n plt.plot(xvals, yvals[n], linearg, lw=2, color=col)\n else:\n plt.plot(xvals[n], yvals[n], linearg, lw=2, color=col)\n\n plt.setp(plt.gca().spines.values(), linewidth=2)\n plt.xticks(fontsize=25, fontname='Times New Roman')\n plt.yticks(fontsize=25, fontname='Times New Roman')\n plt.ticklabel_format(useOffset=False)\n plt.ticklabel_format(style='sci', scilimits=(-3, 3))\n\n if labels is None:\n # Add Colorbar\n sm = plt.cm.ScalarMappable(cmap=cm)\n sm.set_array(datarange)\n cbar = plt.colorbar(sm)\n #cbar.set_label('variation [unit]', fontsize=24, fontweight='bold', fontname='Times New Roman')\n else:\n # Add legend\n plt.legend(labels, loc=0, frameon=False, prop={'size':20,'family':'serif'})", "def app_SN_separate_temperature_series(self, dfs, phase_dirs, sizer_val, styler_val):\n\n print('Currently plotting plot_SN_separate_temperature_series')\n\n for df, direc in zip(dfs, phase_dirs):\n self.plotter.plot_SN_separate_temperature_series(\n df=df,\n phase_directory=direc,\n folder=self.large_test_data.folders['02_time_series_temperature'],\n sizer=self.formatter.sizer[sizer_val]['1x1_full_width'],\n styler=self.formatter.styler[styler_val]\n )\n self.plotter.plot_SN_separate_temperature_series(\n df=df,\n phase_directory=direc,\n folder=self.large_test_data.folders['02_time_series_temperature'],\n sizer=self.formatter.sizer[sizer_val]['1x1_full_width'],\n styler=self.formatter.styler[styler_val],\n xaxis_type='datetime'\n )\n\n print('Done\\n')" ]
[ "0.75955755", "0.6639895", "0.63147664", "0.6244438", "0.6018296", "0.5990527", "0.59632486", "0.5877029", "0.58101994", "0.58101994", "0.5800447", "0.5794655", "0.5775401", "0.57584023", "0.5737174", "0.5724625", "0.5724219", "0.57178116", "0.56640327", "0.56584495", "0.56467277", "0.5619865", "0.56068057", "0.5601182", "0.55957174", "0.5587734", "0.5585871", "0.5565191", "0.552624", "0.550852", "0.55071175", "0.54846853", "0.5472768", "0.546636", "0.5462759", "0.54524934", "0.544084", "0.543578", "0.5435186", "0.5435186", "0.5435186", "0.5435186", "0.5435186", "0.5434945", "0.5428864", "0.5419863", "0.5419531", "0.540869", "0.5402494", "0.53908014", "0.53865135", "0.53856987", "0.5369018", "0.53662825", "0.5362408", "0.5356909", "0.5356564", "0.53508383", "0.5343296", "0.534279", "0.53406733", "0.5337156", "0.5331613", "0.53291893", "0.531942", "0.5306168", "0.53052604", "0.52991635", "0.5296953", "0.5293207", "0.52857625", "0.5281883", "0.5279202", "0.52743447", "0.5268149", "0.5267992", "0.5260553", "0.5259664", "0.5255319", "0.5253458", "0.5249461", "0.5240123", "0.5229463", "0.5228179", "0.5203297", "0.51972485", "0.5197023", "0.5197006", "0.5196118", "0.5195955", "0.51927954", "0.51894605", "0.5180077", "0.51741844", "0.5169221", "0.5157297", "0.5151309", "0.5149813", "0.5148514", "0.5142688" ]
0.7199761
1
Draw a table of all data used to chart y(x)
def draw_table(ax, dfs, legend, x, y): col_labels = dfs_all_values(dfs, x) column_legend = [] cell_text = [] # loop over all pandas.DataFrame objects for df in dfs: # to allow query y(x) easily df = df.set_index(x) df_row = df[y] # build a row with filled blanks '-' row = ["{:.2f}".format(df_row[column]) if column in df_row.index else '-' \ for column in col_labels] cell_text.append(row) ax.axis('tight') ax.axis('off') ax.table(cellText=cell_text, rowLabels=legend, colLabels=col_labels, \ loc='top')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def table(self):\n\n param=self.x_param\n\n device=self.device\n\n base_params=device.get_params()\n\n data_tot=DataFrame()\n\n for i in range(len(param)):\n\n print_index=1\n\n for name in param.names:\n\n device._set_params(param(i))\n\n device.draw()\n\n df=device.export_all()\n\n if self.labels_bottom is not None:\n\n index=self.labels_bottom[i]\n\n else:\n\n index=str(i)\n\n print(\"Generating table, item {} of {}\\r\".format(print_index,len(param)),end=\"\")\n\n data_tot=data_tot.append(Series(df,name=index))\n\n device._set_params(base_params)\n\n return data_tot", "def plot_data(self):", "def plot_table(self):\r\n q = dict(sorted(decorator.arr.items(), key=lambda item: item[1]))\r\n print(\"PROGRAM | RANK | TIME ELAPSED\")\r\n count = 1\r\n for i in q:\r\n print(i[0], \"\\t\", count, \"\\t\", float(q[i]) * 1000, \"ms\")\r\n count += 1", "def table(\n data=None,\n rows=None,\n columns=None,\n hrows=None,\n brows=None,\n lcols=None,\n rcols=None,\n label=None,\n width=None,\n height=None,\n ):\n canvas = Canvas(width=width, height=height)\n axes = canvas.table(\n data=data,\n rows=rows,\n columns=columns,\n hrows=hrows,\n brows=brows,\n lcols=lcols,\n rcols=rcols,\n label=label)\n return canvas, axes", "def __draw(self, state:dict):\n _, ax = plt.subplots()\n ax.set_axis_off()\n tb = Table(ax, bbox=[0,0,1,1])\n\n width = height = 1.0 /9 \n\n\n for key in self.state.keys():\n # Add cells\n i,j = self.__display_table_map[key]\n tb.add_cell(i, j, width, height, text='{}'.format(state[key]), \n loc='center',facecolor= self.__color_map[key])\n\n ax.add_table(tb)\n plt.show()", "def table(ax: Axes, data: DataFrame | Series, **kwargs) -> Table:\n plot_backend = _get_plot_backend(\"matplotlib\")\n return plot_backend.table(\n ax=ax, data=data, rowLabels=None, colLabels=None, **kwargs\n )", "def plotly_table():\n model_data = your_choice()\n model_data[\"test_prediction\"] = list(model_data[\"test_prediction\"])\n \n df = pd.DataFrame(model_data[\"test_prediction\"], columns=[\"test_prediction\"])\n for k,v in model_data.items():\n if k != \"test_prediction\":\n df[k] = str(v)\n\n fig = a_libraries.plotly_table(df)\n\n return fig", "def plot_table(timestamps: dict, threadList: list, mList: list) -> None:\n plt.plot(threadList, timestamps.values(), 'o-')\n plt.legend(mList, title = 'Total valores', loc='best', bbox_to_anchor=(0.5, 0., 0.5, 0.5))\n plt.xlabel('Número de processos')\n plt.ylabel('Tempo de Execução (s)')\n plt.title('Tempo de Execução por Total de Processos e Valores')\n plt.show()", "def create_tables(times, accuracies, batch_sizes):\r\n #Get time data\r\n p_cpu_times = list(times[0].values())\r\n p_gpu_times = list(times[1].values())\r\n c_cpu_times = list(times[2].values())\r\n c_gpu_times = list(times[3].values())\r\n\r\n #Get differences in times\r\n p_diff_times = [a - b for a, b in zip(p_cpu_times, p_gpu_times)]\r\n c_diff_times = [a - b for a, b in zip(c_cpu_times, c_gpu_times)]\r\n cpu_diff_times = [a - b for a, b in zip(p_cpu_times, c_cpu_times)]\r\n gpu_diff_times = [a - b for a, b in zip(p_gpu_times, c_gpu_times)]\r\n\r\n #Set data in np array for table\r\n data = np.array([p_cpu_times,\r\n p_gpu_times,\r\n p_diff_times,\r\n c_cpu_times,\r\n c_gpu_times,\r\n c_diff_times,\r\n cpu_diff_times,\r\n gpu_diff_times]).T\r\n\r\n #Get data in text format\r\n n_rows = data.shape[0]\r\n cell_text = []\r\n for row in range(n_rows):\r\n cell_text.append(['%1.3f' % x for x in data[row]])\r\n \r\n #Get rows and cols for table\r\n columns = ('P CPU Time (s)', 'P GPU Time (s)', 'P Diff (s)', 'C CPU Time (s)', 'C GPU Time (s)', 'C Diff (s)', 'CPU Diff (s)', 'GPU Diff (s)')\r\n row_colors = plt.cm.BuPu(np.linspace(0, 0.5, n_rows))\r\n col_colors = np.array([192/255,192/255,192/255, 1])\r\n col_colors = np.repeat(col_colors.reshape((1, col_colors.shape[0])), len(columns), axis=0)\r\n\r\n #Create table\r\n plt.figure(figsize=(10.8,9.4)).canvas.set_window_title('CPU vs GPU MNIST Neural Network')\r\n plt.table(cellText=cell_text,\r\n rowLabels=batch_sizes,\r\n rowColours=row_colors,\r\n colLabels=columns,\r\n colColours=col_colors,\r\n loc='center')\r\n ax = plt.gca()\r\n ax.axis('off')\r\n plt.savefig('results\\\\figures\\\\table_time.png')\r\n\r\n\r\n #Get accuracy table\r\n #Get accuracy data\r\n p_cpu_accuracy = list(accuracies[0].values())\r\n p_gpu_accuracy = list(accuracies[1].values())\r\n c_cpu_accuracy = list(accuracies[2].values())\r\n c_gpu_accuracy = list(accuracies[3].values())\r\n\r\n #Get max of each batch\r\n p_cpu_max = [max(x) for x in p_cpu_accuracy]\r\n p_gpu_max = [max(x) for x in p_gpu_accuracy]\r\n c_cpu_max = [max(x) for x in c_cpu_accuracy]\r\n c_gpu_max = [max(x) for x in c_gpu_accuracy]\r\n\r\n #Get differences in accuracies\r\n p_diff_acc = [a - b for a, b in zip(p_cpu_max, p_gpu_max)]\r\n c_diff_acc = [a - b for a, b in zip(c_cpu_max, c_gpu_max)]\r\n cpu_diff_acc = [a - b for a, b in zip(p_cpu_max, c_cpu_max)]\r\n gpu_diff_acc = [a - b for a, b in zip(p_gpu_max, c_gpu_max)]\r\n\r\n #Set data in np array for table\r\n data = np.array([p_cpu_max,\r\n p_gpu_max,\r\n p_diff_acc,\r\n c_cpu_max,\r\n c_gpu_max,\r\n c_diff_acc,\r\n cpu_diff_acc,\r\n gpu_diff_acc]).T\r\n\r\n #Get data in text format\r\n n_rows = data.shape[0]\r\n cell_text = []\r\n for row in range(n_rows):\r\n cell_text.append(['%1.3f' % x for x in data[row]])\r\n \r\n #Get rows and cols for table\r\n columns = ('P CPU Acc (%)', 'P GPU Acc (%)', 'P Diff (%)', 'C CPU Acc (%)', 'C GPU Acc (%)', 'C Diff (%)', 'CPU Diff (%)', 'GPU Diff (%)')\r\n\r\n #Create table\r\n plt.clf()\r\n plt.figure(figsize=(10.8,9.4)).canvas.set_window_title('CPU vs GPU MNIST Neural Network')\r\n plt.table(cellText=cell_text,\r\n rowLabels=batch_sizes,\r\n rowColours=row_colors,\r\n colLabels=columns,\r\n colColours=col_colors,\r\n loc='center')\r\n ax = plt.gca()\r\n ax.axis('off')\r\n plt.savefig('results\\\\figures\\\\table_acc.png')", "def draw(x, y):\n\t\n\t##The length of the 'dott' sequence can be adjusted and the rest of the\n\t\t#drawing will adjust itself after reloading\"\"\"\n\tdott = \" ----- \"\n\tpipe = \"|\"\n\t\n\tprint \"\\n\"\n\tif y: print dott * x + \"\\n\"\n\tfor i in xrange(y):\n\t\t#Though not very readable, the line below is responsible for determining how long\n\t\t\t#one y(vertical) cell should be and printinng as many pipes along the y axis\n\t\t\t #after considering the width of a cell(x-axis unit) \n\t\t#The initial part before the final times sign prints 1 + the number of \n\t\t\t#cells along the x axis (rows) inorder to close last cell \n\t\t\t#the calculation of the spacing of the pipes was determined after testing\n\t\t\t\t#for the best fit\n\t\t\n\t\tprint ((\" \"*(len(dott)-1)).join(iter(pipe*(x+1))) + \"\\n\") * (len(dott) / 2)\n\t\t\n\t\tprint dott*x + \"\\n\"", "def make_plot(x,y):", "def data_table(self, X, y, models_predictions):\n models_predictions = assess_models_names(models_predictions)\n base_color = self.plot_design.base_color_tints[0]\n\n # formatter for y and prediction columns to color and style them separately\n cols = [TableColumn(\n field=y.name,\n title=y.name,\n formatter=HTMLTemplateFormatter(template=self._model_column_template.format(color=base_color))\n )]\n\n # predictions\n _ = []\n i = 0\n for model, predictions in models_predictions:\n if i == 0:\n color = self.plot_design.models_color_tuple[0]\n i += 1\n else:\n color = self.plot_design.models_color_tuple[1]\n\n predictions = pd.Series(predictions, name=model).round(6)\n _.append(predictions)\n cols.append(\n TableColumn(\n field=model,\n title=model,\n formatter=HTMLTemplateFormatter(template=self._model_column_template.format(color=color)))\n )\n\n for col in X.columns:\n cols.append(TableColumn(field=col, title=col))\n scores = pd.DataFrame(_).T # by default, wide table is created instead of a long one\n\n # final DataFrame and DataTable\n df = pd.concat([y, scores, X], axis=1)\n source = ColumnDataSource(df)\n dt = DataTable(source=source, columns=cols, editable=False, sizing_mode=\"stretch_width\")\n\n return dt", "def on_scatter_toolbar_table_click(self):\n #print('*** on table click ***')\n row = self.myTableWidget.currentRow()\n if row == -1 or row is None:\n return\n yStat = self.myTableWidget.item(row,0).text()\n self.myParent.replot()", "def visualize_data(y_test, x_test, window_out, num_plots, num_win_ser, cols_y, col_idx):\n \n \n ser_idx = [i for i in range(0, len(y_test), num_win_ser)]\n if num_plots > len(ser_idx):\n print(\"Too many plots, reduce the mumber\")\n else:\n indx = ser_idx[0:num_plots]\n days = range(num_win_ser)\n for idx in indx:\n CR = x_test[idx][0][3]\n #pred = y_pred[idx : idx+num_win_ser, window_out -1, col_idx]\n true = y_test[idx : idx+num_win_ser, window_out -1, col_idx]\n \n plt.title(\"Y_True, CR: \"+ str(CR))\n plt.xlabel('Days')\n plt.ylabel(cols_y[col_idx])\n \n #plt.plot(days, pred, label = 'Pred')\n plt.plot(days, true, label = 'True')\n \n plt.legend()\n plt.show()", "def DrawBarGraph(connection, table_name, y_axis_field, x_axis_values,\n x_axis_field, arrangement):\n def GetListsFromDB(x_axis_values, x_axis_field, connection,\n table_name, y_axis_field, category):\n \"\"\"This function returns lists of values of a field from the DB.\n\n The function returns lists of `y_axis_field` for the values corresponding to\n the `x_axis_values` in `x_axis_field`.\n Args:\n x_axis_values: a list of values for which the `y_axis_field` will be\n fetched for.\n x_axis_field: name of the field for x_axis\n connection: the connection to the database\n table_name: name of the table in the database which has the data\n y_axis_field: the name of the column in the table, whose data will be put\n into the list\n category: Direct or Envoy or which category the data belong to\n Returns:\n Returns a list of lists with all the values of `y_axis_field`\n corresponding to `x_axis_values`.\n \"\"\"\n lists = list()\n for x in x_axis_values:\n condition = (\"where {}=\\\"{}\\\" and\"\n \" category=\\\"{}\\\"\").format(x_axis_field, x, category)\n single_list = db_utils.SingleColumnToList(db_utils.GetFieldFromTable(\n connection, table_name, field=y_axis_field, cond=condition))\n if not single_list:\n print(\"{} {} is not found in table for {} results.\".format(\n x_axis_field, x, category))\n single_list = [0]\n\n lists.append(single_list)\n return lists\n\n direct_lists = GetListsFromDB(x_axis_values, x_axis_field, connection,\n table_name, y_axis_field,\n \"direct-{}\".format(arrangement))\n envoy_lists = GetListsFromDB(x_axis_values, x_axis_field, connection,\n table_name, y_axis_field,\n \"envoy-{}\".format(arrangement))\n\n def GetMeansAndStdsFromList(lists):\n \"\"\"This function returns the means and standard deviation of lists.\n\n Args:\n lists: A list of lists. Each list inside the top-level list consists\n of a sample for a given variable that summary stats will be computed on.\n Returns:\n A pair of list containing means and standard deviations.\n \"\"\"\n means = [np.mean(single_list) for single_list in lists]\n stds = [np.std(single_list) for single_list in lists]\n return means, stds\n\n direct_means, direct_std = GetMeansAndStdsFromList(direct_lists)\n envoy_means, envoy_std = GetMeansAndStdsFromList(envoy_lists)\n\n ind = np.arange(len(x_axis_values))\n width = 0.35\n fig, ax = plt.subplots()\n rects1 = ax.bar(ind, direct_means, width, color=\"r\", yerr=direct_std)\n rects2 = ax.bar(ind + width, envoy_means, width, color=\"y\", yerr=envoy_std)\n\n ax.set_ylabel(y_axis_field)\n ax.set_xlabel(x_axis_field)\n ax.set_xticks(ind + width)\n ax.set_xticklabels(x_axis_values, rotation=\"vertical\", fontsize=8)\n # legend will be placed out of the main graph\n ax.legend((rects1[0], rects2[0]), (\"Direct\", \"Envoy\"),\n loc=\"center left\", bbox_to_anchor=(1, 0.5))\n AutoLabel(rects1, ax)\n AutoLabel(rects2, ax)\n fig.savefig(\"{} {}.png\".format(\n x_axis_field, \",\".join(str(i) for i in x_axis_values)),\n bbox_inches=\"tight\")", "def create_display_data_table():\n\n for ccd in range(0, 10):\n for node in range(0, 4):\n file = 'ccd' + str(ccd) + '_' + str(node)\n infile = data_dir + file\n outfile = web_dir + 'Data/' + file\n\n f = open(infile, 'r')\n data = [line.strip() for line in f.readlines()]\n f.close()\n\n fo = open(outfile, 'w')\n#\n#--- adding heading\n#\n line = \"#\\n#Date Mn K alpha Al K alpha Ti K alpha Slope Sigma Int Sigma\\n#\\n\"\n fo.write(line)\n for ent in data:\n atemp = re.split('\\s+', ent)\n stime = int(atemp[0])\n#\n#--- converting the date into <mon> <year> form (e.g. May 2013)\n#\n ltime = tcnv.axTimeMTA(stime)\n btemp = re.split(':', ltime)\n year = btemp[0]\n [mon, mdate] = tcnv.changeYdateToMonDate(int(year), int(btemp[1]))\n lmon = tcnv.changeMonthFormat(mon)\n line = lmon + ' ' + year \n for j in range(1, len(atemp)):\n line = line + '\\t' + atemp[j]\n\n line = line + '\\n'\n fo.write(line)\n fo.close()", "def paint_cells(self, data):\r\n if len(data) == 0: return\r\n col, row = zip(*data.keys())\r\n colors = tuple(data.values())\r\n if not isinstance(colors[0], Number):\r\n colors = [self.cdict[color] for color in colors] \r\n self.A[row, col] = colors\r\n self.plot()", "def fill_table(self, data):\r\n if len(data) > 0:\r\n if isinstance(data, np.ndarray):\r\n data = data.tolist()\r\n data_rows = len(data)\r\n data_columns = len(data[0])\r\n if data_columns > 0:\r\n self.setRowCount(data_rows)\r\n # We hide the imag part of the complex impedance\r\n self.setColumnCount(data_columns - 1)\r\n for r in range(0, data_rows):\r\n # Update real columns\r\n for c, realc in [(0, 0), (1, 1), (3, 4)]:\r\n item = QTableWidgetItem() \r\n item.setText(str(data[r][realc])) \r\n self.setItem(r, c, item)\r\n # Earth resistance has a hidden column which can have an imaginary number\r\n if data[r][3] != 0.0:\r\n # show complex impedance\r\n item = QTableWidgetItem() \r\n item.setText(str(np.complex(data[r][2], data[r][3])))\r\n self.setItem(r, 2, item)\r\n else:\r\n # show real impedance\r\n item = QTableWidgetItem()\r\n item.setText(str(data[r][2])) \r\n self.setItem(r, 2, item)\r\n # Last Column is a QComboBox to select phasing\r\n phasing = QComboBox()\r\n phasing.addItems([\"Normal\",\"120 degree shift\", \"240 degree shift\"])\r\n phasing.setCurrentIndex(np.real(data[r][5]))\r\n phasing.currentIndexChanged.connect(self.phasing_signal(phasing, r, 5))\r\n self.setCellWidget(r, 4, phasing)", "def visualizations():\r\n raise NotImplementedError\r\n # df = pandas.read_csv('accidents_by_hour.csv', index_col=0, header=0)\r\n # plt.plot(0, 0, data=df)\r\n # plt.show()\r", "def outputTable(xpoints, ypoints, ypointse, outfile='completeness.txt', magType='Instrumental'):\r\n fout = open(outfile, 'w')\r\n fout.write('# '+magType+'F814W fc fce\\n')\r\n\r\n for i,_ in enumerate(xpoints):\r\n fout.write(str(xpoints[i])+' '+\"%1.3f\" %ypoints[i]+' '+\"%1.3f\" %ypointse[i]+'\\n')\r\n fout.close()", "def __str__(self):\n table_string = ''\n values = [x * y for x in range(1, self.x + 1)\n for y in range(1, self.y + 1)\n ]\n for value in range(1, len(values) + 1):\n if value % self.x == 0:\n table_string += f'{values[value - 1]}\\n'\n else:\n table_string += f'{values[value - 1]} | '\n return table_string", "def display_table(dict_list=None, user_config_data=None):\r\n if user_config_data is not None:\r\n # print(tabulate.tabulate(user_config_data, headers=['Variable', 'Value'], tablefmt=\"grid\"))\r\n print(tabulate.tabulate(user_config_data, tablefmt=\"grid\"))\r\n return\r\n\r\n header = [\"idx\"] + list(dict_list[0].keys())\r\n rows = [[idx + 1] + list(x.values()) for idx, x in enumerate(dict_list)]\r\n print(tabulate.tabulate(rows, header, tablefmt=\"grid\"))", "def display_table(dict_list=None, user_config_data=None):\r\n if user_config_data is not None:\r\n # print(tabulate.tabulate(user_config_data, headers=['Variable', 'Value'], tablefmt=\"grid\"))\r\n print(tabulate.tabulate(user_config_data, tablefmt=\"grid\"))\r\n return\r\n\r\n header = [\"idx\"] + list(dict_list[0].keys())\r\n rows = [[idx + 1] + list(x.values()) for idx, x in enumerate(dict_list)]\r\n print(tabulate.tabulate(rows, header, tablefmt=\"grid\"))", "def __update_table(self):\n\n headlines = [\"\", ]\n headlines += range(1, + 1)\n headlines = [\" \"] + [str(x) for x in range(1, self.find_table_length() + 1)]\n self.__main_display_table.config(columns=headlines)\n\n for headline in headlines:\n self.__main_display_table.heading(headline, text=headline)\n self.__main_display_table.column(headline, anchor=\"center\", width=35)\n\n data = self.__display_buses_location()\n\n for i in self.__main_display_table.get_children():\n # deletes all the data in the chart\n self.__main_display_table.delete(i)\n for line in data:\n # inserts new data into the chart, goes line by line\n self.__main_display_table.insert(\"\", END, values=line)", "def plot(self, context=None):\n\n response = requests.get(self.url).content\n table = pd.read_html(response, attrs={\"id\": \"main_table_countries_today\"})\n df = table[0].fillna(0)\n # df.drop(df.index[0], inplace=True) # World\n df.drop([\"ActiveCases\", 'Serious,Critical', 'Serious,Critical', 'Deaths/1M pop', 'Tests/ 1M pop'], axis=1, inplace=True)\n df.drop(df.columns[6], axis=1, inplace=True)\n\n if len(context) > 3:\n context = context.lower().capitalize()\n df = df.loc[df[\"Country,Other\"] == context]\n if 4 > len(context) > 1:\n context = context.upper()\n df = df.loc[df[\"Country,Other\"] == context]\n if len(context) <= 1:\n df = df[1:]\n\n C_Names = df[\"Country,Other\"].head(n=10).values.tolist()\n T_Cases = df[\"TotalCases\"].head(n=10).values.tolist()\n # N_Cases = df[\"NewCases\"].head(n=10).values.tolist() # not plotted\n T_Deaths = df[\"TotalDeaths\"].head(n=10).values.tolist()\n # N_Deaths = df[\"NewDeaths\"].head(n=10).values.tolist() # not plotted\n T_Recovered = df[\"TotalRecovered\"].head(n=10).values.tolist()\n T_Tests = df[\"TotalTests\"].head(n=10).values.tolist()\n\n x = np.arange(len(C_Names))\n width = 0.20\n\n fig, ax = plt.subplots()\n\n ax.bar(x - 0.30, T_Cases, width, label='TotalCases', color=\"Blue\")\n ax.bar(x - 0.10, T_Deaths, width, label='TotalDeaths', color=\"Red\")\n ax.bar(x + 0.10, T_Tests, width, label='TotalTests', color=\"Green\")\n ax.bar(x + 0.30, T_Recovered, width, label='TotalRecovered', color=\"Orange\")\n\n if len(context) > 1:\n ax.set_title(\"{}'s Situation\".format(context))\n else:\n ax.set_title(\"World's Top10 Situation\")\n\n ax.set_xticks(x)\n ax.set_xticklabels(C_Names)\n ax.legend()\n plt.ticklabel_format(style='plain', axis=\"y\")\n fig.set_size_inches(18.5, 10.5)\n fig.tight_layout()\n plt.grid()\n\n if len(context) > 1:\n font1 = {'family': 'serif',\n 'color': 'blue',\n 'weight': 'bold',\n 'size': 20}\n font2 = {'family': 'serif',\n 'color': 'red',\n 'weight': 'normal',\n 'size': 20}\n font3 = {'family': 'serif',\n 'color': 'green',\n 'weight': 'normal',\n 'size': 20}\n font4 = {'family': 'serif',\n 'color': 'orange',\n 'weight': 'normal',\n 'size': 20}\n\n # bbox=dict(facecolor='black', alpha=0.5)\n plt.text(0.863, 0.67, \"Total Cases:\\n{:,}\".format(int(T_Cases[0])), fontdict=font1, transform=ax.transAxes)\n plt.text(0.863, 0.57, \"Total Deaths:\\n{:,}\".format(int(T_Deaths[0])), fontdict=font2, transform=ax.transAxes)\n plt.text(0.863, 0.47, \"Total Tests:\\n{:,}\".format(int(T_Tests[0])), fontdict=font3, transform=ax.transAxes)\n plt.text(0.863, 0.37, \"Total Recovered:\\n{:,}\".format(int(T_Recovered[0])), fontdict=font4, transform=ax.transAxes)\n\n # plt.savefig('corona.png') # Uncomment it to save the figure\n plt.show()", "def test_2d_plot(self):\n db = pd.HDFStore('test.h5')\n df_iv = db['iv']\n dates = df_iv[df_iv['dte'] == 30]['date']\n impl_vols = df_iv[df_iv['dte'] == 30]['impl_vol']\n db.close()\n\n print df_iv.sort_values('impl_vol').head()\n\n plt.plot(dates, impl_vols)\n plt.xlabel('date')\n plt.ylabel('impl_vols')\n plt.show()", "def plottable(scores):\n\n y = []\n x = []\n for key in sorted(scores.keys()):\n if sum(scores[key]) != 0:\n y.append(math.log10(sum(scores[key]) / len(scores[key])))\n\n return y", "def DrawTimeSeriesGraph(connection, table_name, y_axis_field, time,\n arrangement):\n def GetListFromDB(time, category, y_axis_field, connection, table_name):\n condition = (\"where time_of_entry >= \\\"{}\\\" and\"\n \" category=\\\"{}\\\" Group By RunID \"\n \"Order By time_of_entry\").format(\n time, category)\n single_list = db_utils.GetFieldFromTable(\n connection, table_name,\n field=\"AVG({}), STDDEV({}), time_of_entry, RunID\".format(\n y_axis_field, y_axis_field),\n cond=condition)\n if not single_list:\n print(\"Values are not found in table for category {}.\".format(\n category))\n return None\n\n return single_list\n\n direct_list = GetListFromDB(time, \"direct-{}\".format(arrangement),\n y_axis_field, connection, table_name)\n envoy_list = GetListFromDB(time, \"envoy-{}\".format(arrangement),\n y_axis_field, connection, table_name)\n\n if direct_list:\n direct_means, direct_std = zip(*direct_list)[:2]\n direct_times = [v[2].time().strftime(\"%H:%M\") if not i % 2 else \"\"\n for i, v in enumerate(direct_list)]\n else:\n raise ShowGraphError(\"Direct's data not found for time-series graph.\")\n\n if envoy_list:\n envoy_means, envoy_std = zip(*envoy_list)[:2]\n # time is not needed again but if needed, it can be taken from here\n # envoy_times = [v[2] for v in envoy_list]\n else:\n raise ShowGraphError(\"Envoy's data not found for time-series graph.\")\n\n ind = np.arange(len(direct_times))\n fig, ax = plt.subplots()\n rects1 = ax.errorbar(ind, direct_means, color=\"r\", yerr=direct_std)\n rects2 = ax.errorbar(ind, envoy_means, color=\"y\", yerr=envoy_std)\n\n ax.set_ylabel(y_axis_field)\n ax.set_xlabel(\"time\")\n ax.set_xticks(ind)\n ax.set_xticklabels(direct_times, rotation=\"vertical\", fontsize=8)\n ax.legend((rects1[0], rects2[0]), (\"Direct\", \"Envoy\"),\n loc=\"center left\", bbox_to_anchor=(1, 0.5))\n\n # Helper function to put standard deviation as labels inside the graph\n # data points\n def PutStdDevOnGraph(ax, rects, stddev):\n for i, num in enumerate(rects[0].get_xydata()):\n ax.text(num[0], 1.05*num[1],\n \"%d%%\" % int(100.0*stddev[i]/(1.0*num[1])),\n ha=\"center\", va=\"bottom\", fontsize=8)\n\n PutStdDevOnGraph(ax, rects1, direct_std)\n PutStdDevOnGraph(ax, rects2, envoy_std)\n\n fig.savefig(\"Time-{}-{}.png\".format(time, arrangement),\n bbox_inches=\"tight\")", "def table_example():\n\n print(\"\\nExample making a new table from scratch:\\n\")\n # Make a new (empty) table object\n tbl = table(\"A table with random data\")\n # Add three columns called \"x\", \"x^2\" and \"1/x\"\n tbl.addcolumn(\"x\")\n tbl.addcolumn(\"x^2\")\n tbl.addcolumn(\"1/x\")\n # Add some rows of data\n for i in range(0, 10):\n row = dict()\n row[\"x\"] = i\n row[\"x^2\"] = i * i\n if i != 0:\n row[\"1/x\"] = 1.0 / float(i)\n else:\n row[\"1/x\"] = \"?\"\n tbl.add_data(row)\n # Define some graphs\n tbl.definegraph(\"Y = X(squared)\", (\"x\", \"x^2\"))\n tbl.definegraph(\"Y = 1/X\", (\"x\", \"1/x\"))\n tbl.definegraph(\"All data\", (\"x\", \"x^2\", \"1/x\"))\n # Print out the data as a simple \"table\" and in loggraph markup\n print(tbl.show())\n print(tbl.loggraph())", "def create_curve(data_tab, state):\n global width, prev_index, min_temp, max_temp, max_humid, min_humid\n\n def min_max(arr, arr_size):\n \"\"\"\n Helper to get the min and max of the tab\n \"\"\"\n max_t = arr[0]\n min_t = arr[0]\n for i in range(arr_size):\n if arr[i] > max_t:\n max_t = arr[i]\n if arr[i] < min_t:\n min_t = arr[i]\n return min_t, max_t\n\n # The max difference between two temp; if greater than 8, then we need to move vertically\n min_data, max_data = min_max(data_tab, len(data_tab))\n min_max_diff = max(8, max_data - min_data)\n\n # Update min/max values of each curve\n if state == \"temp\":\n min_temp = min(min_data, min_temp)\n max_temp = max(max_data, max_temp)\n elif state == \"humid\":\n min_humid = min(min_data, min_humid)\n max_humid = max(max_data, max_humid)\n\n width = len(data_tab)\n\n normalized_data = data_tab.copy()\n\n for i in range(len(data_tab)):\n normalized_data[i] = ((data_tab[i] - min_data)*7) / min_max_diff\n\n full_data_tab = [[0 for x in range(8)] for y in range(width)]\n\n # The first data that we collected is gonna be centered on the y-axis\n base_data = normalized_data[0]\n\n # Change the base_index depending on max variation of temp\n base_index = 7 - round(base_data)\n\n # Records value for when we change displayed_data\n prev_index = -1\n for i in range(width):\n diff = round(normalized_data[i] - base_data)\n curr_index = base_index - diff\n full_data_tab[i][curr_index] = 1\n\n # COMMENT NEXT FULL BLOCK TO REMOVE VERTICAL PIXELS\n if i > 0:\n delta_index = curr_index - prev_index\n if delta_index > 1:\n for j in range(prev_index + 1, curr_index):\n full_data_tab[i][j] = 1\n if delta_index < -1:\n for j in range(curr_index + 1, prev_index):\n full_data_tab[i][j] = 1\n prev_index = curr_index\n # END OF BLOCK TO COMMENT\n\n\n return full_data_tab", "def plot(self):\n pass", "def generate_table(self, rows):\n ...", "def plot_data_stats(data_dict, data_bxtxn, data_dt):\n print(onp.mean(onp.sum(data_bxtxn, axis=1)), \"spikes/second\")\n f = plt.figure(figsize=(12,4))\n plt.subplot(141)\n plt.hist(onp.mean(data_bxtxn, axis=1).ravel()/data_dt);\n plt.xlabel('spikes / sec')\n plt.subplot(142)\n plt.imshow(data_dict['hiddens'][0,:,:].T)\n plt.xlabel('time')\n plt.ylabel('neuron #')\n plt.title('Sample trial rates')\n plt.subplot(143);\n plt.imshow(data_bxtxn[0,:,:].T)\n plt.xlabel('time')\n plt.ylabel('neuron #')\n plt.title('spikes')\n plt.subplot(144)\n plt.stem(onp.mean(onp.sum(data_bxtxn, axis=1), axis=0));\n plt.xlabel('neuron #')\n plt.ylabel('spikes / sec');\n return f", "def display(self):\n\n if self.points is None:\n data = [\"No data for selected.\"]\n nrows = 0\n ncols = 0\n else:\n nrows = self.points.npoints\n colnames = self.points.axes.keys() + self.points.fields.keys()\n ncols = len(colnames)\n\n self.setRowCount(nrows)\n self.setColumnCount(ncols)\n colnames = self.points.axes.keys() + self.points.fields.keys()\n self.setHorizontalHeaderLabels(colnames)\n\n for i in xrange(nrows):\n # Set each cell to be a QTableWidgetItem from _process_row method\n for j, name in enumerate(colnames):\n if name in self.points.axes:\n item = QtGui.QTableWidgetItem(\n \"%8.3f\" % self.points.axes[name]['data'][i])\n else:\n item = QtGui.QTableWidgetItem(\n \"%8.3f\" % self.points.fields[name]['data'][i])\n\n item.setBackgroundColor = QtGui.QColor(self.bgcolor)\n item.setTextColor = QtGui.QColor(self.textcolor)\n self.setItem(i, j, item)\n\n # Format column width\n self.resizeColumnsToContents()\n\n return", "def drawTable(listOfWord, listOfFrequency):\r\n\r\n\tprint(\"Distribusi frekuensi kata: \")\t\t\t\t # judul di atas tabel\r\n\tprint('-' * 40)\r\n\tprint('{:3s} {:25s} {:10s}'.format('No.', 'Kata', 'Frekuensi'))\r\n\tprint('-' * 40)\r\n\r\n\tnumber = 0\t\t\t\t\t\t\t\t# penomoran poin di dalam tabel\r\n\tindexCounter = 0\t\t\t\t\t\t\t\t\t\r\n\tfor word in listOfWord:\t\t\t\t\t\t\t# mencetak isi tabel\r\n\t\tnumber += 1\r\n\t\tprint('{:3d} {:26s} {:<9d}'.format(number, word, listOfFrequency[indexCounter]))\r\n\t\tindexCounter += 1\r\n\r\n\tprint('-' * 40)", "def table(self, L, R, n):\n s = \"\"\n import numpy as np\n for x in np.linspace(L, R, n):\n y = self(x)\n s += \"%12g %12g\\n\" %(x,y)\n return s", "def table_plot(true4_found4_corr, true4_found4_incorr, true4_found3,\n true3_found4, true3_found3, savename=None):\n \n # Prepare plot on which to place table\n _, ax = plt.subplots()\n plt.xlim(-0.1,5.1)\n plt.ylim(-0.1,3.7)\n ax.axis('off')\n\n n_events = sum([true4_found4_corr, true4_found4_incorr, true4_found3,\n true3_found4, true3_found3])\n\n n_col_1 = sum([true4_found4_corr, true4_found4_incorr, true4_found3])\n n_col_2 = sum([true3_found4, true3_found3])\n\n n_row_1 = sum([true4_found4_corr, true4_found4_incorr, true3_found4])\n n_row_2 = sum([true4_found3, true3_found3])\n \n if n_col_1 != 0:\n true4_found4_corr_pc = true4_found4_corr / n_col_1 * 100\n true4_found4_incorr_pc = true4_found4_incorr / n_col_1 * 100\n true4_found3_pc = true4_found3 / n_col_1 * 100\n else:\n true4_found4_corr_pc = 0\n true4_found4_incorr_pc = 0\n true4_found3_pc = 0\n if n_col_2 != 0:\n true3_found4_pc = true3_found4 / n_col_2 * 100\n true3_found3_pc = true3_found3 / n_col_2 * 100\n else:\n true3_found4_pc = 0\n true3_found3_pc = 0\n\n # add a whole bunch of squares and text\n ax.text(0.5,1, \"4th Jet\\nReco\", fontsize=18, verticalalignment='center', horizontalalignment='center', fontweight='heavy')\n ax.add_patch(patches.Rectangle((0,0),1,2,linewidth=1,edgecolor='#262626',facecolor='w'))\n\n ax.text(1.5,1+1/3, \"4th jet\\nfound\", fontsize=13, verticalalignment='center', horizontalalignment='center', fontweight='heavy')\n ax.text(1.5,1, f\"({n_row_1:.0f})\", fontsize=10, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((1,0),1,1-1/3,linewidth=1,edgecolor='#262626',facecolor='w'))\n\n ax.text(3-0.05,1/3, f\"{true4_found3_pc:.1f}%\", fontsize=14, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.text(3-0.05,1/9, f\"({true4_found3:.0f})\", fontsize=10, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((2,0),2-0.1,1-1/3,linewidth=1,edgecolor='#262626',facecolor='#ffff66'))\n\n ax.text(4.45,1/3, f\"{true3_found3_pc:.1f}%\", fontsize=14, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.text(4.45,1/9, f\"({true3_found3:.0f})\", fontsize=10, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((4-0.1,0),1+0.1,1-1/3,linewidth=1,edgecolor='#262626',facecolor='#00ff66'))\n\n ax.text(1.5,0.4, \"No 4th jet\\nfound\", fontsize=13, verticalalignment='center', horizontalalignment='center', fontweight='heavy')\n ax.text(1.5,1/9, f\"({n_row_2:.0f})\", fontsize=10, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((1,1-1/3),1,1+1/3,linewidth=1,edgecolor='#262626',facecolor='w'))\n\n ax.text(2.5,1+2/3, \"Correct\\n4th jet\", fontsize=14, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((2,1-1/3),1,0.5*(1+1/3),linewidth=1,edgecolor='#262626',facecolor='w'))\n\n ax.text(2.5,1, \"Incorrect\\n4th jet\", fontsize=14, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((2,1-1/3+0.5*(1+1/3)),1,0.5*(1+1/3),linewidth=1,edgecolor='#262626',facecolor='w'))\n\n ax.text(3.45,1+2/3, f\"{true4_found4_corr_pc:.1f}%\", fontsize=14, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.text(3.45,1+2/3-2/9, f\"({true4_found4_corr:.0f})\", fontsize=10, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((3,1-1/3),1-0.1,0.5*(1+1/3),linewidth=1,edgecolor='k',facecolor='#ff6666'))\n\n ax.text(3.45,1, f\"{true4_found4_incorr_pc:.1f}%\", fontsize=14, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.text(3.45,1-2/9, f\"({true4_found4_incorr:.0f})\", fontsize=10, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((3,1-1/3+0.5*(1+1/3)),1-0.1,0.5*(1+1/3),linewidth=1,edgecolor='#262626',facecolor='#00ff66'))\n\n ax.text(4.45,1+1/3, f\"{true3_found4_pc:.1f}%\", fontsize=14, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.text(4.45,1+1/3-2/9, f\"({true3_found4:.0f})\", fontsize=10, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((4-0.1,1-1/3),1+0.1,1+1/3,linewidth=1,edgecolor='#262626',facecolor='#ff6666'))\n\n ax.text(3,2.375, \"4th tag exists\", fontsize=14, verticalalignment='center', horizontalalignment='center', fontweight='heavy')\n ax.text(3,2.375-2/9, f\"({n_col_1:.0f})\", fontsize=10, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((2,2),2-0.1,0.75,linewidth=1,edgecolor='#262626',facecolor='w'))\n\n ax.text(4.45,2.375, \"No 4th tag\", fontsize=14, verticalalignment='center', horizontalalignment='center', fontweight='heavy')\n ax.text(4.45,2.375-2/9, f\"({n_col_2:.0f})\", fontsize=10, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((4-0.1,2),1+0.1,0.75,linewidth=1,edgecolor='#262626',facecolor='w'))\n\n ax.text(3.5,3.1, \"Truth-Matching\", fontsize=18, verticalalignment='center', horizontalalignment='center', fontweight='heavy')\n\n ax.text(1,2.375, f\"(# events={n_events:.0f})\", fontsize=14, verticalalignment='center', horizontalalignment='center', fontweight='normal')\n ax.add_patch(patches.Rectangle((2,2+0.75),3,0.75,linewidth=1,edgecolor='#262626',facecolor='w'))\n\n # format and show/save\n plt.tight_layout()\n if savename:\n plt.savefig(f\"table_{savename}.png\", dpi=300)\n plt.show()", "def _gen_table_style_lines(self):\n yield '.heatmap {border: none; border-collapse: collapse; border-spacing: 0}'\n yield '.heatmap td {padding: 0; margin: 0; font-family: monospace;}'", "def draw_result(result, x_label, y_label, type='line'):\n # The default point marker and color\n POINT_MARKER = {'SPT': 'o', 'ST': 'v',\n 'WSPT': 's', 'WST': '*',\n 'BST': 'p', 'BBSRT': 'x',\n 'BBST': 'D'}\n POINT_COLOR = {'SPT': '#c3637f', 'ST': '#eb8773',\n 'WSPT': '#f4b861', 'WST': '#d9ea70',\n 'BST': '#81d2b4', 'BBSRT': '#5bc0d5',\n 'BBST': '#70acf6'}\n # The figure size\n plt.figure(figsize=(9, 6))\n plt.rcParams['font.sans-serif'] = 'SimSun'\n # Check the figure type\n if type == 'line':\n # Draw the line figure\n for key in result:\n plt.plot(*zip(*sorted(result[key].items())), label=key,\n color=POINT_COLOR[key], marker=POINT_MARKER[key])\n\n elif type == 'bar':\n # Draw the bar figure\n # Get the x values\n x_value = list(result['SPT'].keys())\n # Compute the appropriate width\n width = (x_value[1] - x_value[0]) / 8\n # Compute the offset\n offset = [i - (len(result) - 1) / 2 for i in range(len(result))]\n index = 0\n for key in result:\n # Compute the x value of each result\n x = list(result[key].keys())\n for i in range(len(x)):\n # The origin value plus the offset value\n x[i] += offset[index] * width\n plt.bar(x, list(result[key].values()), width=width,\n label=key, color=POINT_COLOR[key])\n index += 1\n\n # Set the y line\n plt.grid(axis='y')\n # Set the legend\n plt.legend(bbox_to_anchor=(1.05, 0.4), loc=3, borderaxespad=0)\n # Set x and y ticks\n plt.xticks(fontsize=15)\n plt.yticks(fontsize=15)\n # Set x and y labels\n plt.xlabel(x_label, fontsize=20)\n plt.ylabel(y_label, fontsize=20)\n # Show the figure\n plt.show()", "def table(self, L, R, n):\n s = \"\"\n for x in np.linspace(L, R, n):\n y = self(x)\n s += \"%12g %12g\\n\" % (x, y)\n return s", "def visualize_data(total_rewards):\n\n x_values = arange(0, len(total_rewards), 1)\n y_values = total_rewards\n plot(x_values, y_values)\n xlabel('episodes')\n ylabel('cumulative rewards')\n title('Reward by Episode')\n grid(True)\n show()", "def table(self, L, R, n):\n s =''\n import numpy as np\n for x in np.linspace(L, R, n):\n y = self(x)\n s += '%12g %12g\\n' % (x, y)\n return s", "def visualizeData(df):\n for column in df:\n df[column].value_counts().plot(kind = 'bar', rot = 'vertical', use_index = False)", "def setTable(self):\n if not self.outvar or self.data==None:\n return\n\n self.table.setColumnCount(len(self.data.domain.attributes) + (self.data.domain.classVar != None) + len(self.predictors))\n self.table.setRowCount(len(self.data))\n \n print self.table.rowCount(), len(self.data.domain.attributes), (self.data.domain.classVar != None), len(self.predictors)\n\n # HEADER: set the header (attribute names)\n## for col in range(len(self.data.domain.attributes)):\n## self.header.setLabel(col, self.data.domain.attributes[col].name)\n labels = [attr.name for attr in self.data.domain.variables] + [c.name for c in self.predictors.values()]\n self.table.setHorizontalHeaderLabels(labels)\n## col = len(self.data.domain.attributes)\n## if self.data.domain.classVar != None:\n## self.header.setLabel(col, self.data.domain.classVar.name)\n## col += 1\n## for (i,c) in enumerate(self.predictors.values()):\n## self.header.setLabel(col+i, c.name)\n\n # ATTRIBUTE VALUES: set the contents of the table (values of attributes), data first\n for i in range(len(self.data)):\n for j in range(len(self.data.domain.attributes)):\n## self.table.setText(i, j, str(self.data[i][j]))\n self.table.setItem(i, j, QTableWidgetItem(str(self.data[i][j])))\n col = len(self.data.domain.attributes)\n\n # TRUE CLASS: set the contents of the table (values of attributes), data first\n self.classifications = [[]] * len(self.data)\n if self.data.domain.classVar:\n for (i, d) in enumerate(self.data):\n c = d.getclass()\n item = colorItem(str(c))\n self.table.setItem(i, col, item)\n self.classifications[i] = [c]\n col += 1\n\n## for i in range(col):\n## self.table.adjustColumn(i)\n\n # include predictions, handle show/hide columns\n self.updateTableOutcomes()\n self.updateAttributes()\n self.updateTrueClass()\n self.table.show()", "def ascii_table(self, tablefmt=\"pipe\"):\n methods = self.methods\n xvalues = self.xvalues\n plot_matrix = self.plot_matrix\n\n import tabulate\n # https://pypi.python.org/pypi/tabulate\n aug_table = np.hstack((np.array(methods)[:, np.newaxis], plot_matrix))\n return tabulate.tabulate(aug_table, xvalues, tablefmt=tablefmt)", "def _make_tables(self, df):\n # Time table:\n time_keys = ['time', 'endtime', 'event_number_nv']\n self.df_event_time = df.loc[:, time_keys]\n\n # Properties tables:\n pos_keys = ['angle', 'pos_x', 'pos_x_spread', 'pos_y',\n 'pos_y_spread', 'pos_z', 'pos_z_spread']\n self.df_event_position = df.loc[:, pos_keys]\n\n keys = df.columns.values\n keys = [k for k in keys if k not in time_keys + pos_keys]\n self.df_event_properties = df.loc[:, keys]\n\n # Table panels:\n index = self.evt_sel_slid.value\n self.time_table = pn.panel(self.df_event_time.loc[index],\n )\n self.pos_table = pn.panel(self.df_event_position.loc[index:index, :],\n sizing_mode='scale_width')\n\n self.prop_table = pn.panel(self.df_event_properties.loc[index:index, :],\n sizing_mode='scale_width')", "async def stat_table(self, data):\n\n table = \"\"\n table += tabulate([data[\"stats\"][1]], data[\"stats\"][0], tablefmt=\"grid\") + \"\\n\"\n table += tabulate([data[\"resist\"][1]], data[\"resist\"][0], tablefmt=\"grid\") + \"\\n\"\n if data[\"inherits\"] and data[\"inherits\"][0]:\n table += tabulate([data[\"inherits\"][1]], data[\"inherits\"][0], tablefmt=\"grid\") + \"\\n\"\n \n skills = tabulate(data[\"skills\"][1], data[\"skills\"][0], tablefmt=\"grid\")\n if len(skills) > 2000:\n counter = 0\n split_skills = []\n skills = skills.split(\"\\n\")\n skills = [\"\\n\".join(skills[8*i:min(8*(i+1)+1, len(skills))])\n for i in range(int(len(skills) / 8))]\n else:\n skills = [skills]\n\n results = [\"```\\n\" + table[:-1] + \"\\n```\"]\n for skill in skills:\n results.append(\"```\\n\" + skill + \"\\n```\")\n return results", "def initPlotY(self):\n\n self.plotFineY = [np.array([]) for i in range(len(self.plotFineX))]", "def plot_table(self, axn: str = \"table\", df: Optional[DataFrame] = None) -> None:\n if self.axes is None:\n axs = self.initialize_figure(mosaic=[[axn]], figsize=(6, 4), return_ax=True)\n else:\n axs = self.axes\n\n if df is None:\n df = DataFrame([\"empty\"])\n\n axs[axn].table(\n df.values.tolist(),\n colLabels=df.columns,\n colColours=[(1.0, 1.0, 1.0, 1.0)]\n + [self.cmap(i, alpha=0.75) for i in range(len(df.columns) - 1)],\n bbox=(0.0, 0.0, 1.0, 1.0),\n )\n axs[axn].set_xticks([])\n axs[axn].set_yticks([])\n return axs[axn]", "def table(self, L, R, n=10):\n s = \"\"\n for x in np.linspace(L, R, n):\n y = self(x)\n s += f\"({x:.2f}, {y:.2f})\\n\"\n return s", "def print_table(listx):\r\n\tfor lists in listx:\r\n\t\tfor i in lists:\r\n\t\t\tprint str(i) , '\\t',\r\n\t\tprint()", "def display(self):\n covtable = self.covtable\n covtable.clearContents()\n covtable.setRowCount(0)\n for entry in self.config['table']:\n row_position = covtable.rowCount()\n covtable.insertRow(row_position)\n covtable.setItem(row_position, 0, sit.PercentWidgetItem(entry[0]))\n covtable.setItem(row_position, 1, sit.QTableWidgetItem(entry[1]))\n covtable.setItem(row_position, 2, sit.HexWidgetItem(entry[2]))\n covtable.setItem(row_position, 3, sit.RatioWidgetItem(entry[3]))\n covtable.setItem(row_position, 4, sit.centered_text(entry[4]))", "def callback_tablechanged(table_data):\n return {\n \"data\": [\n {\"x\": [row[\"x0\"], row[\"x1\"]], \"y\": [row[\"y0\"], row[\"y1\"]], \"type\": \"line\"}\n for row in table_data\n ]\n }", "def plot_settings_table(settings, table_nr=1, plot_out=None):\n\n keys = settings.keys()\n\n data_matrix_1 = [keys[:len(keys) / 3], []]\n for key in data_matrix_1[0]:\n data_matrix_1[1].append(str(settings[key]))\n\n data_matrix_2 = [keys[len(keys) / 3:2 * len(keys) / 3], []]\n for key in data_matrix_2[0]:\n data_matrix_2[1].append(str(settings[key]))\n\n data_matrix_3 = [keys[2 * len(keys) / 3:len(keys)], []]\n for key in data_matrix_3[0]:\n data_matrix_3[1].append(str(settings[key]))\n\n data = [data_matrix_1, data_matrix_2, data_matrix_3]\n\n nr_columns = len(data[table_nr - 1][0])\n plot = {'data': [{'colorscale': [[0, '#00083e'], [0.5, '#ededee'], [1, '#ffffff']],\n 'hoverinfo': 'none',\n 'opacity': 0.75,\n 'showscale': False,\n 'type': 'heatmap',\n 'z': [[0, 0.5] for row in range(nr_columns)]\n }],\n 'layout': {\n 'annotations': [],\n 'yaxis1': {'autorange': 'reversed',\n 'showgrid': False,\n 'showticklabels': False,\n 'zeroline': False,\n 'ticks': ''\n },\n 'xaxis1': {\n 'showgrid': False,\n 'showticklabels': False,\n 'zeroline': False,\n 'ticks': '',\n 'range': [0, 1]\n\n },\n 'title': \" \"\n }\n }\n\n # heading\n for table_cell in range(nr_columns):\n plot['layout']['annotations'].append({})\n plot['layout']['annotations'][table_cell].update({'text': data[table_nr - 1][0][table_cell]})\n plot['layout']['annotations'][table_cell].update({'font': {\n 'color': '#ffffff',\n 'size': 15}\n })\n plot['layout']['annotations'][table_cell].update({'y': table_cell})\n plot['layout']['annotations'][table_cell].update({'x': 0.1})\n plot['layout']['annotations'][table_cell].update({'xref': 'x1'})\n plot['layout']['annotations'][table_cell].update({'yref': 'y1'})\n plot['layout']['annotations'][table_cell].update({'align': 'center'})\n plot['layout']['annotations'][table_cell].update({'xanchor': 'left'})\n plot['layout']['annotations'][table_cell].update({'showarrow': False})\n\n # content\n for table_cell in range(nr_columns):\n plot['layout']['annotations'].append({})\n plot['layout']['annotations'][table_cell + nr_columns].update({'text': data[table_nr - 1][1][table_cell]})\n plot['layout']['annotations'][table_cell + nr_columns].update({'x': 0.75})\n plot['layout']['annotations'][table_cell + nr_columns].update({'y': table_cell})\n plot['layout']['annotations'][table_cell + nr_columns].update({'xref': 'x1'})\n plot['layout']['annotations'][table_cell + nr_columns].update({'yref': 'y1'})\n plot['layout']['annotations'][table_cell + nr_columns].update({'showarrow': False})\n\n if plot_out is not None:\n plotly_plot(plot, filename=settings['plot_out'], auto_open=False)\n else:\n return plot", "def chart_data_table(self):\n return self.container['chart_data_table']", "def show_data(self):\n\n self.area_canvas.axes.cla()\n self.draw_scatterplot(self.scatter_canvas, 'x [µm]', 'y [µm]', self.p_inputs['flip y-axis'].isChecked())\n self.draw_hist(self.area_canvas, 'area', 'cluster area [µm²]', 'number of clusters')\n self.draw_hist(self.number_canvas, 'nclusters', 'number of cluster', 'number of regions')\n self.draw_hist(self.density_canvas, 'density', 'cluster density [µm⁻²]', 'number of clusters')\n self.draw_hist(self.percentage_canvas, 'pclustered', 'percentage clustered',\n 'number of regions')\n self.draw_hist(self.ratio_canvas, 'reldensity', 'relative density clusters/background',\n 'number of regions')", "def create_table(self, title: str, columns: List[str], data: Dict[str, str]) -> None:\n table = Table(title=title, box=box.SIMPLE)\n for column in columns:\n table.add_column(column, justify=\"right\", style=\"bright_yellow\", no_wrap=True)\n\n for model, percentage in data.items():\n table.add_row(model, percentage)\n\n console = Console()\n console.print(table)", "def render_mpl_table(data,\n col_width=15,\n row_height=0.625,\n font_size=12,\n header_color='#40466e',\n row_colors=['#f1f1f2', 'w'],\n edge_color='w',\n bbox=[0, 0, 1, 1],\n header_columns=0,\n ax=None,\n **kwargs):\n\n # the np.array added to size is the main determinant for column dimensions\n if ax is None:\n size = (np.array(data.shape[::-1]) + np.array([2, 1])) * np.array(\n [col_width, row_height])\n fig, ax = plt.subplots(figsize=size)\n ax.axis('off')\n\n mpl_table = ax.table(cellText=data.values,\n bbox=bbox,\n colLabels=data.columns,\n **kwargs)\n\n mpl_table.auto_set_font_size(False)\n mpl_table.set_fontsize(font_size)\n\n for k, cell in six.iteritems(mpl_table._cells):\n cell.set_edgecolor(edge_color)\n if k[0] == 0 or k[1] < header_columns:\n cell.set_text_props(weight='bold', color='w')\n cell.set_facecolor(header_color)\n else:\n cell.set_facecolor(row_colors[k[0] % len(row_colors)])\n return ax", "def test_to_redo():\n from collections import OrderedDict\n import matplotlib.pyplot as plt\n from matplotlib.ticker import MultipleLocator, FormatStrFormatter\n\n url_base = 'http://172.20.38.50/iop/test_to_redo/dbphone_test_to_redo_'\n year = 2016\n week = 8\n url = '{0}{1}_w{2}.xml'.format(url_base, year, week)\n\n # Build mainfields dictionary\n stats_mainfields = OrderedDict()\n stats_categories = OrderedDict()\n\n tests, tests_cat = get_test_to_redo_stats(url)\n while tests and tests_cat:\n stats_mainfields[week] = tests\n stats_categories[week] = tests_cat\n week += 1\n url = '{0}{1}_w{2}.xml'.format(url_base, year, week)\n\n tests, tests_cat = get_test_to_redo_stats(url)\n\n c_week = week - 1\n weeks = [w for w, _ in stats_categories.iteritems()]\n\n with open(\"./header.html\", \"r\") as header,\\\n open(\"./tests_to_redo.tpl\", \"r\") as tests_to_redo,\\\n open(\"./footer.html\", \"r\") as footer:\n template_html = header.read() + tests_to_redo.read() + \"<br>\" * 10 + footer.read()\n\n for category, value in stats_categories[c_week].iteritems():\n x = weeks\n y = [stats_categories[w][category][0] for w in weeks]\n ax = plt.subplot(111)\n ax.plot(x, y, lw=1)\n\n # set the basic properties\n ax.set_xlabel('Weeks')\n ax.set_ylabel('Tests')\n ax.set_title(\"{} evolution\".format(category[0]))\n xlab = ax.xaxis.get_label()\n ylab = ax.yaxis.get_label()\n xlab.set_style('italic')\n xlab.set_size(10)\n ylab.set_style('italic')\n ylab.set_size(10)\n # set the grid on\n ax.grid('on')\n\n ax.fill_between(x, 0, y, alpha=0.2)\n majorLocator = MultipleLocator(0.5)\n ax.xaxis.set_major_locator(majorLocator)\n\n plt.savefig(\"static/img/{}.svg\".format(category[0]), format='svg')\n plt.close()\n output = template(template_html, stats_mainfields=stats_mainfields, stats_categories=stats_categories, week=c_week)\n return output", "def plot_all_components(X,y, save=True):\r\n n=len(X[0,:])\r\n for i in range(n):\r\n plot_data_component_i(X, y, i, save)", "def plot(self):\n # Get data\n #print(self.file_name)\n fig, ax = plb.subplots(1,1,figsize=(18,20))\n for key,value in self.testTrend.items():\n x = np.arange(len(self.data_array))\n y = np.asarray(value)\n plb.plot(x,y, label=key)\n ax.scatter(x, y)\n for i in range(0, len(value)):\n ax.annotate(str(i), (x[i], y[i]))\n # Title\n plb.title(self.file_name)\n # Legend\n plb.legend(bbox_to_anchor=(.05, 1), loc='best', borderaxespad=0.)\n # x ticks\n plb.xticks(np.arange(min(x), max(x) + 1, 2.0))\n #plb.ylim(-250, 1)\n # Show image\n plb.show()", "def plot(self):\n\t\tplot_chain(self.database_path, self.temp_folder)\n\t\tplot_density(self.database_path, self.temp_folder, self.cal_params)", "def plot_graph(self) -> None:", "def data_visualization(df):\r\n\r\n # Visualizing the target variable\r\n plt.figure(figsize=(14, 10))\r\n plt.title(\"Count of bike sharing according to dates\")\r\n plt.plot(df['dteday'], df['cnt'])\r\n #plt.show()\r\n plt.savefig(\"Raw data visualization.png\")\r\n\r\n # box plot for visualizing outliers\r\n fig=px.box(df, y=\"cnt\", notched=True,title='Box plot of the count variable')\r\n #fig.show()\r\n plt.savefig(\"Box Plot.png\")\r\n\r\n # point plot for hourly utilization\r\n for column in ['season', 'yr', 'mnth', 'holiday', 'weekday', 'workingday', 'weathersit']:\r\n hist = px.histogram(df, x=column, y='cnt')\r\n hist.show()\r\n plt.savefig(\"Histogram plots for each column.png\")\r\n sns.pointplot(x=df['hr'], y='cnt', data=df);\r\n plt.title(\"Hourly Utilization\")\r\n plt.ylabel(\"Bike Shares\", fontsize=12)\r\n plt.xlabel(\"Hour\", fontsize=12)\r\n plt.savefig(\"Hourly Utilization point plot.png\", dpi=300, bbox_inches='tight')\r\n\r\n # line plot for hourly utilization\r\n for c in ['holiday','season','workingday']:\r\n sns.lineplot(data=df,x='hr',y='cnt',hue=c)\r\n plt.title('Hourly plot vs count')\r\n plt.savefig(\"Hour vs count plot_main features.png\",dpi=300, bbox_inches='tight')\r\n\r\n # point plots for humidity vs count\r\n sns.pointplot(x='hum', y='cnt', data=df)\r\n plt.title(\"Amount of bike shares vs humidity\", fontsize=25)\r\n plt.xlabel(\"Humidity (%)\", fontsize=20)\r\n plt.ylabel('count of bike shares', fontsize=20)\r\n plt.locator_params(axis='x', nbins=10)\r\n plt.savefig(\"Pointplot of humidity vs count.png\",dpi=300, bbox_inches='tight')\r\n\r\n # box plots of whole df\r\n bx=px.box(df, y=\"cnt\")\r\n bx.show()\r\n\r\n # feature correlation plot\r\n corrs = abs(df.corr())\r\n sns.heatmap(corrs, annot=True)\r\n plt.title(\"Feature Correlation\")\r\n plt.savefig(\"Feature_correlation.png\", dpi=300, bbox_inches='tight')\r\n return plt", "def draw_graph(pencil: turtle.Turtle, posx, posy, data_list):\n # Your code here\n data_list = [100, 50, 150, 300, 200, 100, 50, 150, 300, 200.9, 200]\n turtle.setup(1500, 800)\n t.penup()\n t.goto(posx, posy)\n t.pendown()\n draw_bars(turtle, data_list)\n draw_legend(turtle, data_list)\n t.done()", "def charting(lim=2020):\r\n for indic in ['FLR ', 'CRE ', 'TISA', 'SSPI', 'US7 ']:\r\n for c in ['A', 'M', 'P', 'T', 'all']:\r\n # TODO: fix charting for SSPI - it returns three values\r\n data = chart_data(indic, '2018-09-01', 12*5, c, lim=lim).set_index('date').sort_index()\r\n y = ['SP1', 'SP2', 'SP5', 'SSPI'] if indic == 'SSPI' else ['Perc.idv', 'Perc.ids']\r\n data.plot(kind='line', y=y)\r\n plt.xticks(range(len(data)), data.index.tolist(), rotation=30)\r\n plt.xlabel(None)\r\n plt.axhline(y=100, color='r', linestyle='-', label='Individual target')\r\n plt.axhline(y=75, color='b', linestyle='-', label='Industry target')\r\n plt.title(centres[c] + ' ' + indic)\r\n plt.savefig('pic/' + str(lim) + c + indic.strip() + '.png')\r\n logging.info('pic/' + str(lim) + c + indic.strip() + '.png saved')", "def table_top(data, name, axs):\n\n # Count\n v_count = []\n for i in name:\n v_col_size = data[i].size\n v_count.append(v_col_size)\n\n # Use built in tex only, no depandancy needed\n sample_count_str = \"samples, \" + r' $n$ '\n\n symbols = pd.DataFrame([sample_count_str])\n val = pd.DataFrame([v_count])\n data = pd.concat([symbols, val], axis=1)\n\n # Get column names out of list\n labels = [\"\"]\n for i in name:\n labels.append(i)\n\n top = axs.table(\n cellText=data.values,\n colLabels=labels,\n loc='center',\n cellLoc=\"center\",\n colLoc='center',\n # xmin, ymin, width, height\n bbox=(0, 0, 1, 1),\n edges=\"\")\n\n table_settings(axs, top)\n\n # As the above table_settings function sets black\n # line on top overwrite that setting\n axs.spines['top'].set_color('white')", "def plotData(X, y):\n plt.figure()\n\n # Find Indices of Positive and Negative Examples\n pos = np.where(y == 1, True, False).flatten()\n neg = np.where(y == 0, True, False).flatten()\n\n # Plot Examples\n plt.plot(X[pos, 0], X[pos, 1], 'k+', linewidth=1, markersize=7)\n plt.plot(X[neg, 0], X[neg, 1], 'ko', color='y', markersize=7)", "def print_table(data):\n for key in sorted(data):\n print \"%s: %s\" % (key.rjust(16), data[key])", "def get_drawdown_table(self, top=5):\n return pf.timeseries.gen_drawdown_table(self.port_rets, top=top)", "def __profile_to_table(data):\n output = [[\"condition\", \"mean\", \"min\", \"max\"]]\n order = data[\"order\"]\n\n for o in order:\n try:\n values = data[\"data\"][o]\n output.append(\n [o, str(mean(values)), str(min(values)), str(max(values))]\n )\n except Exception as e:\n print(e)\n\n return \"\\n\".join([\"\\t\".join(l) for l in output])", "def grid_plot_twitter(proverbs_list, data,dim = (4,4), ylog = False, rt = False): \n plt.rcParams.update({\n 'font.size': 9,\n 'axes.titlesize': 8,\n 'axes.labelsize': 14,\n 'xtick.labelsize': 7,\n 'ytick.labelsize': 7,\n 'legend.fontsize': 10,\n })\n \n rows, cols = dim[0],dim[1]\n fig = plt.figure(figsize=(12, 5.75))\n gs = gridspec.GridSpec(ncols=cols, nrows=rows)\n gs.update(wspace = 0.2, hspace = 0.2)\n \n \n i = 0\n \n fig.text(0.5, 0.02,'Year' , ha='center', fontsize = 14)\n fig.text(0.02, 0.5, 'Frequency among all {}-grams on Twitter'.format(len(proverbs_list[0].split())), va='center', rotation='vertical', fontsize = 14)\n \n #loop to create each timeseries plot in the grid\n for r in np.arange(0, rows, step=1):\n for c in np.arange(cols):\n\n ax = fig.add_subplot(gs[r, c])\n\n ax.text(0.1,0.9,'\\\"{}\\\"'.format(proverbs_list[i]),horizontalalignment='left', transform=ax.transAxes)\n ts = data[data.proverb ==proverbs_list[i]]\n ts.date = pd.to_datetime(ts.date, format = '%Y-%m-%d', errors='coerce')\n ts.index = ts.date\n ts = ts.sort_index()\n print(ts)\n ts2 = ts.copy()[['freq_noRT', 'freq']]\n print(ts2)\n ts2 = ts2.rolling(window=30).mean()\n print(ts2)\n\n \n if ylog == False:\n pass\n\n elif ylog == True:\n ax.set_yscale('log') \n\n if rt == False:\n ax.plot(ts.index, ts['freq_noRT'], alpha = 0.5, color = 'gray')\n ax.plot(ts2.index, ts2['freq_noRT'], alpha = 0.9, color='darkorange') \n \n elif rt ==True:\n ax.plot(ts.index, ts['freq'], alpha = 0.5, color = 'gray')\n ax.plot(ts2.index, ts2['freq'], alpha = 0.9, color='darkorange')\n i+=1\n \n plt.subplots_adjust(left=0.08, right=0.95, top=0.95, bottom=0.1)", "def _gen_table_rows(self):\n row_labels = self._get_padded_row_labels()\n column_labels = self._get_padded_column_labels()\n for row in zip(*column_labels):\n yield ''.join('<td>%s</td>' % c for c in row)\n for label, row_string in zip(row_labels, HeatMap._gen_table_rows(self)):\n yield ''.join('<td>%s</td>' % c for c in label) + row_string", "def draw_data(self, num_dat):\n\n raise NotImplementedError", "def helix_triplet_stats (self):\n\n for Value in ['Phi']:\n\n HistogramPlot(np.array(self. values_list(Value, flat=True)), 'myproject/myapp/static/myapp/static/Stats/HelixTriplet/'+Value )\n #zrobic jakies dict coby robilo ranges, uzaleznialo np od zakresu albo od czegos\n\n return", "def plot2D_all(df, sample, sgn, pdf_key):\n\n for xvar in df.columns:\n for yvar in df.columns:\n if xvar!=yvar:\n fig, axs = plt.subplots(figsize=(15, 10))\n cax = plt.hist2d(df[xvar],df[yvar],range=[[df[xvar].min(), df[xvar].max()], [df[yvar].min(), df[yvar].max()]], bins=100,\n norm=mpl.colors.LogNorm(), cmap=plt.cm.viridis)\n\n\n if sgn==1:\n plt.title('Signal candidates ' + sample, fontsize = 25)\n\n if sgn==0:\n plt.title('Background candidates ' + sample, fontsize = 25)\n\n\n plt.xlabel(xvar, fontsize=25)\n plt.ylabel(yvar, fontsize=25)\n\n\n mpl.pyplot.colorbar()\n\n plt.legend(shadow=True,title =str(len(df))+ \" samples\")\n\n fig.tight_layout()\n plt.savefig(pdf_key,format='pdf')\n pdf_key.close()", "def descriptive_table(data, column_name, fig_size=(8, 8)):\n\n # Set up figure dimensions and sub components.\n sheet, axs = plt.subplots(4, 1, figsize=fig_size)\n\n # Heights ratio is based on the number of rows in each\n # table, this relates to the number of statistics each\n # sub table will show.\n gs = gridspec.GridSpec(4, 1, height_ratios=[2, 2, 5, 9])\n\n # Assign all subplots based on figure dimensions.\n ax0 = plt.subplot(gs[0])\n ax1 = plt.subplot(gs[1])\n ax2 = plt.subplot(gs[2])\n ax3 = plt.subplot(gs[3])\n\n title_color = '#9099A2' # Dark grey\n plt.suptitle(\n 'Descriptive Statistics',\n fontsize=16,\n color=title_color,\n x=0.25\n )\n\n table_top(data, column_name, ax0)\n table_central_tend(data, ax1)\n table_disperssion(data, ax2)\n table_distribution(data, ax3)\n\n # Adjust the spacing so the title fits correctly.\n sheet.subplots_adjust(hspace=0.2, top=0.95)", "def plot_dataset(self):\n plt.plot(self.ground_truth, marker='o')\n plt.ylabel('Number of Topics')\n plt.xlabel('Window Number')\n plt.yticks(list(set(self.ground_truth)))\n plt.savefig(os.path.join(self.output_path, 'shift-plot.pdf'))", "def paint_history_chart(self):\r\n\r\n if self.change_type == TYPE_ORDERBOOK:\r\n # erase only the rightmost column to redraw bid/ask and orders\r\n # beause we won't redraw the chart, its only an orderbook change\r\n self.win.vline(0, self.width - 1, \" \", self.height, COLOR_PAIR[\"chart_text\"])\r\n else:\r\n self.win.bkgd(\" \", COLOR_PAIR[\"chart_text\"])\r\n self.win.erase()\r\n\r\n hist = self.gox.history\r\n book = self.gox.orderbook\r\n\r\n self.pmax = 0\r\n self.pmin = 9999999999\r\n\r\n # determine y range\r\n posx = self.width - 2\r\n index = 0\r\n while index < hist.length() and posx >= 0:\r\n candle = hist.candles[index]\r\n if self.pmax < candle.hig:\r\n self.pmax = candle.hig\r\n if self.pmin > candle.low:\r\n self.pmin = candle.low\r\n index += 1\r\n posx -= 1\r\n\r\n if self.pmax == self.pmin:\r\n return\r\n\r\n # paint the candlestick chart.\r\n # We won't paint it if it was triggered from an orderbook change\r\n # signal because that would be redundant and only waste CPU.\r\n # In that case we only repaint the bid/ask markers (see below)\r\n if self.change_type != TYPE_ORDERBOOK:\r\n # paint the candles\r\n posx = self.width - 2\r\n index = 0\r\n while index < hist.length() and posx >= 0:\r\n candle = hist.candles[index]\r\n self.paint_candle(posx, candle)\r\n index += 1\r\n posx -= 1\r\n\r\n # paint the y-axis labels\r\n posx = 0\r\n step = self.get_optimal_step(4)\r\n if step:\r\n labelprice = int(self.pmin / step) * step\r\n while not labelprice > self.pmax:\r\n posy = self.price_to_screen(labelprice)\r\n if posy < self.height - 1:\r\n self.paint_y_label(posy, posx, labelprice)\r\n labelprice += step\r\n\r\n # paint bid, ask, own orders\r\n posx = self.width - 1\r\n for order in book.owns:\r\n if self.is_in_range(order.price):\r\n posy = self.price_to_screen(order.price)\r\n if order.status == \"pending\":\r\n self.addch(posy, posx,\r\n ord(\"p\"), COLOR_PAIR[\"order_pending\"])\r\n else:\r\n self.addch(posy, posx,\r\n ord(\"o\"), COLOR_PAIR[\"book_own\"])\r\n\r\n if self.is_in_range(book.bid):\r\n posy = self.price_to_screen(book.bid)\r\n # pylint: disable=E1101\r\n self.addch(posy, posx,\r\n curses.ACS_HLINE, COLOR_PAIR[\"chart_up\"])\r\n\r\n if self.is_in_range(book.ask):\r\n posy = self.price_to_screen(book.ask)\r\n # pylint: disable=E1101\r\n self.addch(posy, posx,\r\n curses.ACS_HLINE, COLOR_PAIR[\"chart_down\"])", "def __init__(self):\n self.tables = pd.DataFrame({\n \"tables\": TABLES,\n \"year\": GREEN_YEARS + YELLOW_YEARS,\n \"color\": [\"green\" for i in GREEN_YEARS] +\n [\"yellow\" for i in YELLOW_YEARS]\n })", "def generate_table(self, outtablename,\n cols=['A', 'B', 'AB'],\n generateTable=True):\n if generateTable:\n new_indices = ['time (s)', 'mean counts']\n for idx in self.data[cols].describe().index[2:]:\n new_indices.append(idx)\n outTable = self.data[cols].describe()\\\n .set_index(pd.Index(new_indices))\n outTable.to_latex(\n self.tables_dir + outtablename + \".tex\", float_format=\"%d\")\n print(\"Outtable: \", outTable)", "def test():\n data1 = resources_vs_time(0.0, 50)\n data2 = resources_vs_time(1.0, 10)\n data3 = resources_vs_time(2.0, 10)\n data4 = resources_vs_time(0.5, 10)\n print data1\n simpleplot.plot_lines(\"Growth\", 600, 600, \"time\", \"total resources\", [data1])", "def plot_table(mat, width=.15, ratio=4):\n vmax = np.abs(mat).max()\n vals = np.around(mat, 2)\n fig = plt.figure()\n ax = fig.add_subplot(111, frameon=False, xticks=[], yticks=[])\n table = plt.table(cellText=vals, colWidths=[width]*vals.shape[1],\n loc='center', cellColours=plt.cm.RdBu_r(\n Normalize(-vmax, vmax)(mat)))\n table.scale(1, ratio)\n return fig", "def test_y(self):\n g = gca()\n lines = g.get_lines() \n self.assertEqual(lines[0].get_ydata().tolist(), [3, 3, 1, 1, 3])", "def visualize(self):\n self.dataFrame.hist()\n plt.show()", "def transaction_plot(ds):\n import seaborn as sns\n import pandas as pd\n df = pd.DataFrame()", "def show_plot(self):\n if self.result is None:\n print('目前無結果。')\n else:\n self.plot_output_result.setRows(len(self.column_name)) # 設定subplot的列數\n for i, column in enumerate(self.column_name): \n self.plot_output_result.canvas.ax[i].plot( # 畫原資料 + 預測結果,以紅色線表示\n range(1, len(self.Data) + 3),\n [i for i in self.Data.loc[:, column]] + [i for i in self.result.loc[:, column]], \n linewidth = 1, color = 'firebrick'\n )\n self.plot_output_result.canvas.ax[i].plot( # 畫原資料,以藍色線表示(使得只有預測曲線是紅的)\n range(1, len(self.Data) + 1), self.Data.loc[:, column], color = 'steelblue'\n )\n self.plot_output_result.canvas.ax[i].fill_between( # 畫信賴區間的背景\n range(len(self.Data), len(self.Data) + 3), \n [self.Data.loc[:, column].values[-1]] + [i for i in self.result.loc[:, column + '_LB']], \n [self.Data.loc[:, column].values[-1]] + [i for i in self.result.loc[:, column + '_UB']], \n facecolor = 'salmon', alpha = 0.6, interpolate = True\n )\n self.plot_output_result.canvas.ax[i].set_ylabel(column)\n self.plot_output_result.canvas.ax[-1].set_xlabel('Week', fontproperties = FontProperties(fname = \"SimHei.ttf\", size = 14))\n self.plot_output_result.canvas.figure.subplots_adjust(wspace = 0.1, hspace = 0.5) # 調整子圖間距\n self.plot_output_result.canvas.draw() # 類似plt.show()", "def plot_result(data, gt_y, pred_y):\n assert data.shape[0] == gt_y.shape[0]\n assert data.shape[0] == pred_y.shape[0]\n\n plt.figure()\n\n plt.subplot(1, 2, 1)\n plt.title('Ground Truth', fontsize=18)\n\n for idx in range(data.shape[0]):\n if gt_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.subplot(1, 2, 2)\n plt.title('Prediction', fontsize=18)\n\n for idx in range(data.shape[0]):\n if pred_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.show()", "def plot_result(data, gt_y, pred_y):\n assert data.shape[0] == gt_y.shape[0]\n assert data.shape[0] == pred_y.shape[0]\n\n plt.figure()\n\n plt.subplot(1, 2, 1)\n plt.title('Ground Truth', fontsize=18)\n\n for idx in range(data.shape[0]):\n if gt_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.subplot(1, 2, 2)\n plt.title('Prediction', fontsize=18)\n\n for idx in range(data.shape[0]):\n if pred_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.show()", "def print_TRT_cell_histograms(samples_df,cfg_set_tds):\r\n \r\n fig_hist, axes = plt.subplots(3, 2)\r\n fig_hist.set_size_inches(12, 15)\r\n\r\n ## Analyse distribution of ranks\r\n \"\"\"\r\n nw = np.sum(np.logical_and(samples_df[\"RANKr\"]>=12, samples_df[\"RANKr\"]<15))\r\n ng = np.sum(np.logical_and(samples_df[\"RANKr\"]>=15, samples_df[\"RANKr\"]<25))\r\n ny = np.sum(np.logical_and(samples_df[\"RANKr\"]>=25, samples_df[\"RANKr\"]<35))\r\n nr = np.sum(np.logical_and(samples_df[\"RANKr\"]>=35, samples_df[\"RANKr\"]<=40))\r\n print(\" The number of Cells with TRT Rank w is: %s\" % nw)\r\n print(\" The number of Cells with TRT Rank g is: %s\" % ng)\r\n print(\" The number of Cells with TRT Rank y is: %s\" % ny)\r\n print(\" The number of Cells with TRT Rank r is: %s\" % nr)\r\n pw = patches.Rectangle((1.2, 65000), 0.3, 10000, facecolor='w')\r\n pg = patches.Rectangle((1.5, 65000), 1, 10000, facecolor='g')\r\n py = patches.Rectangle((2.5, 65000), 1, 10000, facecolor='y')\r\n pr = patches.Rectangle((3.5, 65000), 0.5, 10000, facecolor='r')\r\n axes[0,0].add_patch(pw); axes[0,0].add_patch(pg); axes[0,0].add_patch(py); axes[0,0].add_patch(pr)\r\n axes[0,0].annotate(str(nw),(1.35,70000),(1.25,90500),ha='center',va='center',color='k',arrowprops={'arrowstyle':'->'}) #,arrowprops={arrowstyle='simple'}\r\n axes[0,0].annotate(str(ng),(2,70000),ha='center',va='center',color='w') \r\n axes[0,0].annotate(str(ny),(3,70000),ha='center',va='center',color='w')\r\n axes[0,0].annotate(str(nr),(3.75,70000),ha='center',va='center',color='w') \r\n \"\"\"\r\n axes[0,0] = plot_band_TRT_col(axes[0,0],samples_df[\"RANKr\"],65000,10000,arrow_start=90500)\r\n samples_df[\"RANKr\"] = samples_df[\"RANKr\"]/10.\r\n samples_df[\"RANKr\"].hist(ax=axes[0,0],bins=np.arange(0,4.25,0.25),facecolor=(.7,.7,.7),alpha=0.75,grid=True)\r\n axes[0,0].set_xlabel(\"TRT rank\")\r\n axes[0,0].set_title(\"TRT Rank Distribution\")\r\n \r\n samples_df[\"area\"].hist(ax=axes[0,1],bins=np.arange(0,650,50),facecolor=(.7,.7,.7),alpha=0.75,grid=True)\r\n axes[0,1].set_xlabel(\"Cell Area [km$^2$]\")\r\n axes[0,1].set_title(\"Cell Size Distribution\")\r\n \r\n samples_df[\"date\"] = samples_df[\"date\"].astype(np.datetime64)\r\n \r\n samples_df[\"date\"].groupby(samples_df[\"date\"].dt.month).count().plot(kind=\"bar\",ax=axes[1,0],facecolor=(.7,.7,.7),\r\n alpha=0.75,grid=True)\r\n #axes[1,0].set_xlabel(\"Months\")\r\n axes[1,0].set_xlabel(\"\")\r\n axes[1,0].set_xticklabels([\"Apr\",\"May\",\"Jun\",\"Jul\",\"Aug\",\"Sep\"],rotation=45)\r\n axes[1,0].set_title(\"Monthly Number of Cells\")\r\n\r\n samples_df[\"date\"].groupby([samples_df[\"date\"].dt.month,\r\n samples_df[\"date\"].dt.day]).count().plot(kind=\"bar\",\r\n ax=axes[1,1],facecolor=(.7,.7,.7),alpha=0.75,edgecolor=(.7,.7,.7),grid=True)\r\n axes[1,1].get_xaxis().set_ticks([])\r\n axes[1,1].set_xlabel(\"Days over period\")\r\n axes[1,1].set_title(\"Daily Number of Cells\")\r\n \r\n samples_df[\"date\"].groupby(samples_df[\"date\"]).count().hist(ax=axes[2,0],bins=np.arange(0,150,10),\r\n facecolor=(.7,.7,.7),alpha=0.75,grid=True)\r\n axes[2,0].set_xlabel(\"Number of cells\")\r\n axes[2,0].set_title(\"Number of cells per time step\")\r\n \r\n #samples_df[\"date\"].loc[samples_df[\"RANKr\"]>=1].groupby(samples_df[\"date\"]).count().hist(ax=axes[2,1],bins=np.arange(0,65,5),\r\n # facecolor=(.7,.7,.7),alpha=0.75,grid=True)\r\n #axes[2,1].set_xlabel(\"Number of cells\")\r\n #axes[2,1].set_title(\"Number of cells (TRT Rank >= 1)\\n per time step\")\r\n axes[2,1].axis('off')\r\n \r\n fig_hist.savefig(os.path.join(cfg_set_tds[\"fig_output_path\"],u\"TRT_Histogram.pdf\"))", "def draw_hist(self, canvas, data_type, x_label, y_label):\n\n canvas.axes.cla()\n data_in = self.import_postdata(data_type)\n data_temp = [np.nan if i == np.inf else i for i in data_in]\n data_temp = [i for i in data_temp if (math.isnan(i) == False)]\n if len(data_temp) > 1:\n bw = 2 * np.subtract.reduce(np.percentile(data_temp, [75, 25])) / len(data_temp) ** (1 / 3)\n if bw == 0:\n bw = 1\n canvas.axes.hist(data_temp, bins=np.arange(min(data_temp), max(data_temp) + bw, bw))\n else:\n canvas.axes.hist(data_temp, bins=1)\n canvas.axes.set_ylabel(y_label, fontsize='10')\n canvas.axes.set_xlabel(x_label, fontsize='10')\n canvas.draw()", "def get_plotly_table(per_base_coverage_by_sample, sample_labels, low_coverage_cutoff):\n\n header = [\n \"<b>Samples</b>\",\n \"<b>Mean</b>\",\n \"<b>Median</b>\",\n \"<b>SD</b>\",\n \"<b>Min</b>\",\n \"<b>Max</b>\",\n \"<b>Range</b>\",\n \"<b>Low Coverage Bases</b><br> coverage <= {}\".format(low_coverage_cutoff),\n ]\n\n samples_cell = []\n mean_cell = []\n median_cell = []\n min_cell = []\n max_cell = []\n sd_cell = []\n range_cell = []\n low_coverage_cell = []\n\n ## get descriptive statistics\n for i, cov_list in enumerate(per_base_coverage_by_sample):\n\n samples_cell.append(\"<b>{}</b>\".format(sample_labels[i]))\n mean_cell.append(\"{:.3f}\".format(np.mean(cov_list)))\n median_cell.append(\"{:.3f}\".format(np.median(cov_list)))\n min_cell.append(np.amin(cov_list))\n max_cell.append(np.amax(cov_list))\n sd_cell.append(\"{:.3f}\".format(np.std(cov_list)))\n range_cell.append(np.ptp(cov_list))\n low_coverage_cell.append((np.array(cov_list) <= low_coverage_cutoff).sum())\n\n fig = go.Figure(\n data=[\n go.Table(\n columnwidth=[100, 70, 80, 60, 60, 60, 70, 200],\n header=dict(\n values=header,\n line_color=\"darkslategray\",\n fill_color=\"royalblue\",\n align=[\"left\", \"center\"],\n font=dict(color=\"white\", size=15),\n height=40,\n ),\n cells=dict(\n values=[\n samples_cell,\n mean_cell,\n median_cell,\n sd_cell,\n min_cell,\n max_cell,\n range_cell,\n low_coverage_cell,\n ],\n line_color=\"darkslategray\",\n fill=dict(\n color=[\n \"royalblue\",\n \"white\",\n \"white\",\n \"white\",\n \"white\",\n \"white\",\n \"white\",\n \"white\",\n ]\n ),\n align=[\"left\", \"center\"],\n font=dict(\n color=[\n \"white\",\n \"black\",\n \"black\",\n \"black\",\n \"black\",\n \"black\",\n \"black\",\n \"black\",\n ],\n size=[15, 12, 12, 12, 12, 12, 12, 12],\n ),\n height=30,\n ),\n )\n ]\n )\n\n fig.update_layout(title_text=\"Descriptive Statistics Table\")\n\n return (\n fig.to_html(full_html=False, include_plotlyjs=\"cdn\"),\n max(max_cell),\n bool(max(low_coverage_cell) > 0),\n )", "def xyPlot(xPlotFunc\n\t\t\t,yPlotFunc\n\t\t\t,table\n\t\t\t,filterList\n\t\t\t,ax\n\t\t\t,legendLabel=None\n\t\t\t,labelFunc=None\n\t\t\t,title=None\n\t\t\t,commonConstraints=[completed]\n\t\t\t,codeList=['ro--','gx--','b^--','ms--','y*--','ko--','co--','ro:','gx:','b^:','ms:','y*:','ko:','co:','ro-','gx-','b^-','ms-','y*-','ko-','co-']):\n\txys=[]\n\tfor i,constraintList in enumerate(filterList):\t\n\t\txs = [xPlotFunc.func(*x) for x in plotQuery(table,xPlotFunc.cols,constraintList+commonConstraints)]\n\t\tys = [yPlotFunc.func(*y) for y in plotQuery(table,yPlotFunc.cols,constraintList+commonConstraints)]\n\t\tif labelFunc is not None: \n\t\t\tlabel = [labelFunc.func(*l) for l in plotQuery(table,labelFunc.cols,constraintList+commonConstraints)]\n\t\ttry: \n\t\t\txy= sorted(zip(xs,ys)) #order the pairs\n\t\t\tx,y = zip(*xy)\n\t\t\tax.plot(x,y,codeList[i%len(codeList)],label='' if legendLabel is None else legendLabel[i])\n\t\t\tif labelFunc is not None: \n\t\t\t\tfor i in range(len(x)):\tax.annotate(label[i],xy=(x[i],y[i]),fontsize=9)\n\t\texcept ValueError: print \"Warning, no data found for constraint #\"+str(i+1)\n\t\txys.append(xy)\n\tif title is not None: ax.set_title(title)\n\n\tax.set_xlabel(xPlotFunc.axisLabel)\n\tax.set_ylabel(yPlotFunc.axisLabel)\n\t\n\tif legendLabel is not None: \n\t\tlegend = ax.legend(loc='best', shadow=True)\n\t\tlegend.get_frame().set_facecolor('#00FFCC')\n\t\tlegend.draggable()\n\treturn xys", "def plot(self, data_frame):\n self.axes.plot(data_frame, 'o-')\n self.axes.set_ylim(0.0, 200.0)\n self.fig.autofmt_xdate()\n self.draw()", "def print_table(self):\n print(\"%-12s%-12s%-12s%-12s%-12s\" % (\"index\",\"balance\",\"payment\",\"interest\",\"amortization\"))\n print(\"-------------------------------------------------------------\")\n for i in self.table[\"index\"]:\n print(\"%-12i%-12i%-12i%-12i%-12i\" % (self.table[\"index\"][i],self.table[\"balance\"][i]\\\n ,self.table[\"payment\"][i],self.table[\"interest\"][i],\\\n self.table[\"amortization\"][i]))", "def make_performance_table(self):\n table = Table()\n table.add_column(\"Classifier\", ratio=25)\n table.add_column(\"Score\", ratio=10, justify=\"center\", no_wrap=True)\n table.add_column(\"Params\", ratio=25, no_wrap=False)\n table.add_column(\"Model ID\",ratio=40, no_wrap=True)\n\n for name, stuff in self.trainer.performance.items():\n score, params, hash_id = stuff\n style = \"bold green\" if name == self.trainer.best_classifier__name else \"\"\n best_one = \" ***\" if name == self.trainer.best_classifier__name else \"\"\n \n table.add_row(\n str(name),\n str(np.round(score, 3)), \n str(params), \n f\"{str(hash_id)}{best_one}\",\n style=style)\n \n return table", "def data_table(\n filepath=\"sparkify_data.csv\",\n title=\"Engineered Features Dataframe\",\n ):\n df = read_data_csv(filepath)\n fig = go.Figure(\n data=[\n go.Table(\n header=dict(\n values=list(df.columns), align=\"left\"\n ),\n cells=dict(\n values=[df[col] for col in df.columns],\n align=\"left\",\n ),\n )\n ]\n )\n\n fig.update_layout(title=go.layout.Title(text=title, x=0.5))\n\n return fig", "def init_output_mat(self, y_list):", "def forebears (WFROM,WTO,efrom, eto, g=25):\n \n c.execute(\"\"\"\n SELECT wyear, eyear, count (eyear), wnationality\n FROM clean \n WHERE (eyear IS NOT Null) AND (wyear IS NOT Null)\n AND WYEAR >= ? and WYEAR <= ? \n AND eyear >= ? AND eyear <= ? \n GROUP BY wyear, eyear\n ORDER BY wyear, eyear\"\"\", (WFROM, WTO, efrom, eto))\n\n years = c.fetchall()\n epigraphtotal = sum (s for (x,y,s,n) in years)\n #plt.xlim(WFROM, WTO)\n #plt.ylim(100, -1500)\n #colors = list(mcolors.TABLEAU_COLORS.keys()) *20\n #print(colors)\n \n \n gen =dd(lambda: dd(int))\n gentotal= dd(int)\n for (x,y,s,n) in years:\n gen[generation(x,g)][generation(y-x,g)] += 1\n gentotal[generation(x,g)] +=1\n \n for x in gen:\n for y in gen[x]:\n print(x, y, gen[x][y], gentotal[x])\n\n \n\n plt.figure(figsize=(10, 5))\n ax=plt.axes()\n\n\n #df.plot(colormap=gray) \n cumtotal = [0]*len(gen)\n\n for d in range(0,-200, -1):\n #for d in range(min(gen.keys()),max(gen.keys()),-1):\n xv = list(gen.keys())\n yv = [rat(gen[x][d],gentotal[x]) for x in xv]\n plt.bar(xv, yv, bottom=cumtotal,\n tick_label=[x*g for x in xv])\n cumtotal = [x + y for x, y in zip(yv, cumtotal)]\n #colors.pop()\n #print(d, cumtotal)\n plt.xlabel('Year of Work (in generations)')\n plt.ylabel(f'Share of Distance to forebear (in {g} year generations)')\n plt.title(f'Distance back vs Year of Work ({epigraphtotal} epigraphs)')\n plt.savefig(f\"figs/eg-forebear-{WFROM}:{WTO}-{efrom}:{eto}-{g}.png\")\n plt.close()", "def footprint_demo(**kw):\n # Note: needs fixed slits in single_point()\n count = 1500000\n data = []\n for theta in np.linspace(0.15, 5, 30):\n n = single_point(theta=theta, count=count, trace=False, **kw)\n data.append((theta, np.sum(n.active)))\n print(data[-1])\n x, y = zip(*data)\n pylab.plot(x, np.array(y)/count)\n pylab.show()" ]
[ "0.64481795", "0.6369142", "0.61868083", "0.615285", "0.60729265", "0.60604787", "0.59911245", "0.5980409", "0.59740573", "0.5895259", "0.5869834", "0.5847987", "0.5836506", "0.5818547", "0.58129287", "0.5809058", "0.57461077", "0.57257223", "0.5720079", "0.5665956", "0.5658745", "0.5657095", "0.56568444", "0.56556606", "0.5653524", "0.56482875", "0.56281406", "0.5612309", "0.56049144", "0.5597462", "0.55969524", "0.5549854", "0.5542334", "0.5501766", "0.54785734", "0.54710495", "0.5467085", "0.54391235", "0.54356205", "0.54184675", "0.5409614", "0.53935075", "0.53924096", "0.53863746", "0.53662336", "0.5364411", "0.5362722", "0.5356316", "0.5352667", "0.53372616", "0.5322067", "0.53157556", "0.5305238", "0.5296226", "0.52960217", "0.5292358", "0.5283394", "0.5279969", "0.52775943", "0.5265774", "0.5256308", "0.52559984", "0.52455014", "0.5242633", "0.52322954", "0.52274036", "0.52269346", "0.5225511", "0.5223172", "0.5221922", "0.52161866", "0.5208619", "0.5206827", "0.5205959", "0.52025235", "0.519898", "0.5198257", "0.5197215", "0.519367", "0.5184684", "0.5184629", "0.5180084", "0.51697767", "0.5164442", "0.5158581", "0.5158167", "0.51461357", "0.514393", "0.514393", "0.51413596", "0.5140162", "0.5139185", "0.51371026", "0.5136035", "0.51332366", "0.5131128", "0.5119556", "0.51132375", "0.5112503", "0.5110255" ]
0.67767966
0
Load all of the ovarian dataframes as values in the self._data dict variable, with names as keys, and format them properly.
def __init__(self, version="latest", no_internet=False): # Set some needed variables, and pass them to the parent Dataset class __init__ function valid_versions = ["0.0", "0.0.1"] # This keeps a record of all versions that the code is equipped to handle. That way, if there's a new data release but they didn't update their package, it won't try to parse the new data version it isn't equipped to handle. data_files = { "0.0": [ "clinical.csv.gz", "cnv.tsv.gz", "definitions.txt", "phosphoproteomics.txt.gz", "proteomics.txt.gz", "somatic_38.maf.gz", "transcriptomics.tsv.gz", "treatment.csv.gz"], "0.0.1": [ "clinical.csv.gz", "cnv.tsv.gz", "definitions.txt", "Ovary_One_Year_Clinical_Data_20160927.xls", "phosphoproteomics.txt.gz", "proteomics.txt.gz", "somatic_38.maf.gz", "transcriptomics.tsv.gz", "treatment.csv.gz"], } super().__init__(cancer_type="ovarian", version=version, valid_versions=valid_versions, data_files=data_files, no_internet=no_internet) # Load the data files into dataframes in the self._data dict loading_msg = f"Loading {self.get_cancer_type()} v{self.version()}" for file_path in self._data_files_paths: # Print a loading message. We add a dot every time, so the user knows it's not frozen. loading_msg = loading_msg + "." print(loading_msg, end='\r') path_elements = file_path.split(os.sep) # Get a list of all the levels of the path file_name = path_elements[-1] # The last element will be the name of the file df_name = file_name.split(".")[0] # Our dataframe name will be the first section of file name (i.e. proteomics.txt.gz becomes proteomics) if file_name == "clinical.csv.gz" or file_name == "treatment.csv.gz": df = pd.read_csv(file_path, sep=",", index_col=0) df = df.rename(columns={"Participant_ID":"Patient_ID"}) df = df.set_index("Patient_ID") self._data[df_name] = df #maps dataframe name to dataframe elif file_name == "cnv.tsv.gz": df = pd.read_csv(file_path, sep="\t", index_col=0) df = df.sort_index() df = df.transpose() df = df.sort_index() self._data["CNV"] = df #maps dataframe name to dataframe elif file_name == "definitions.txt": with open(file_path, "r", errors="ignore") as definitions_file: for line in definitions_file.readlines(): line = line.strip() line = line.split("\t") term = line[0] definition = line[1] self._definitions[term] = definition elif file_name == "phosphoproteomics.txt.gz" or file_name == "proteomics.txt.gz": df = pd.read_csv(file_path, sep='\t') if file_name == "proteomics.txt.gz": df = df[df["hgnc_symbol"].notnull()] # Drops all nan values in hgnc_symbol column # Create our column multiindex df = df.rename(columns={"hgnc_symbol": "Name", "refseq_peptide": "Database_ID"}) df = df.set_index(["Name", "Database_ID"]) elif file_name == "phosphoproteomics.txt.gz": df = df[df["site"].notnull()] # Drops all rows with nan values in site column # Create our column multiindex split_genes = df["site"].str.rsplit("-", n=1, expand=True) # Split the genes from the sites, splitting from the right since some genes have hyphens in their names, but the genes and sites are also separated by hyphens df = df.drop(columns=["hgnc_symbol", "site"]) # hgnc_symbol is a duplicate of split_genes[0], and site is now in split_genes and will be re-inserted differently df = df.assign(Name=split_genes[0], Site=split_genes[1]) df["Site"] = df["Site"].str.replace(r"[sty]", r"", regex=True) # Get rid of all lowercase s, t, and y delimeters in the sites df = df.rename(columns={"refseq_peptide": "Database_ID"}) df = df.set_index(["Name", "Site", "Peptide", "Database_ID"]) # Turn these columns into a multiindex df = df.sort_index() df = df.transpose() df.index = df.index.where(~df.index.str.startswith('C'), df.index.str[1:]) # Take C prefix off of indices for those samples that have them (tumor samples have C, normal have N) df = df.drop(index=df.index[df.index.str.startswith("OV_QC")]) # Drop all OV_QC samples--they're quality control samples not relevant for data analysis self._data[df_name] = df elif file_name == "somatic_38.maf.gz": df = pd.read_csv(file_path, sep = "\t", index_col=0) df = df.reset_index() split_barcode = df["Tumor_Sample_Barcode"].str.split("_", n = 1, expand = True) # The first part of the barcode is the patient id, which we need to make a Patient_ID column df["Tumor_Sample_Barcode"] = split_barcode[0] df = df[["Tumor_Sample_Barcode","Hugo_Symbol","Variant_Classification","HGVSp_Short"]] # We only want these columns df = df.rename(columns={"Tumor_Sample_Barcode":"Patient_ID","Hugo_Symbol":"Gene","Variant_Classification":"Mutation","HGVSp_Short":"Location"}) df = df.sort_values(by=["Patient_ID", "Gene"]) df = df.set_index("Patient_ID") self._data['somatic_mutation'] = df elif file_name == "transcriptomics.tsv.gz": df = pd.read_csv(file_path, sep="\t", index_col=0) df = df.sort_index() df = df.transpose() df = df.sort_index() date_cols = ['1-Dec', '1-Sep', '10-Mar', '10-Sep', '11-Sep', '12-Sep', '14-Sep', '15-Sep', '2-Mar', '2-Sep', '3-Mar', '3-Sep', '4-Mar', '4-Sep', '5-Mar', '6-Mar', '6-Sep', '7-Mar', '7-Sep', '8-Mar', '8-Sep', '9-Mar', '9-Sep'] df = df.drop(columns=date_cols) # Drop all date values until new data is uploaded self._data[df_name] = df #maps dataframe name to dataframe elif file_name == 'Ovary_One_Year_Clinical_Data_20160927.xls' and self._version == "0.0.1": df = pd.read_excel(file_path) # Replace redundant values for "not reported" with NaN nan_equivalents = ['Not Reported/ Unknown', 'Reported/ Unknown', 'Not Applicable', 'na', 'unknown', 'Not Performed', 'Unknown tumor status', 'Unknown', 'Unknown Tumor Status', 'Not specified'] df = df.replace(nan_equivalents, np.nan) # Rename PPID to Patient_ID and set as index df = df.rename(columns={'PPID': 'Patient_ID'}) df = df.set_index("Patient_ID") df = df.sort_index() self._data["followup"] = df print(' ' * len(loading_msg), end='\r') # Erase the loading message formatting_msg = "Formatting dataframes..." print(formatting_msg, end='\r') # Get a union of all dataframes' indices, with duplicates removed master_index = unionize_indices(self._data, exclude="followup") # Use the master index to reindex the clinical dataframe, so the clinical dataframe has a record of every sample in the dataset. Rows that didn't exist before (such as the rows for normal samples) are filled with NaN master_clinical = self._data['clinical'].reindex(master_index) # Add a column called Sample_Tumor_Normal to the clinical dataframe indicating whether each sample was a tumor or normal sample. Normal samples have a Patient_ID that begins with 'N'. clinical_status_col = generate_sample_status_col(master_clinical, normal_test=lambda sample: sample[0] == 'N') master_clinical.insert(0, "Sample_Tumor_Normal", clinical_status_col) # Replace the clinical dataframe in the data dictionary with our new and improved version! self._data['clinical'] = master_clinical # Edit the format of the Patient_IDs to have normal samples marked the same way as in other datasets. Currently, all the normal samples have an "N" prepended. We're going to erase that and put a ".N" at the end. self._data = reformat_normal_patient_ids(self._data, existing_identifier="N", existing_identifier_location="start") # Call function from dataframe_tools.py to sort all tables first by sample status, and then by the index self._data = sort_all_rows(self._data) # Call function from dataframe_tools.py to standardize the names of the index and column axes self._data = standardize_axes_names(self._data) print(" " * len(formatting_msg), end='\r') # Erase the formatting message
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_dataframe(self):\n logging.info('*** Creating the dataframes from the source files ' )\n \n for k in self.datasets_keys:\n #for k in ['igra2' , 'ncar']:\n \n logging.info('*** Creating the dataframe for the dataset: %s ' , k ) \n \n p_levels = self.data[k]['df']['observations_table']['z_coordinate'][:]\n logging.debug(' Loaded the z_coordinate')\n \n z_type = self.data[k]['df']['observations_table']['z_coordinate_type'][:]\n logging.debug(' Loaded the z_coordinate_type')\n \n obs_variable = self.data[k]['df']['observations_table']['observed_variable'][:]\n logging.debug(' Loaded the observed_variable')\n \n obs_values = self.data[k]['df']['observations_table']['observation_value'][:]\n logging.debug(' Loaded the observation_value')\n \n observation_id = self.data[k]['df']['observations_table']['observation_id'][:]\n logging.debug(' Loaded the observation_id')\n \n units = self.data[k]['df']['observations_table']['units'][:].astype(int)\n logging.debug(' Loaded the units') \n \n report_id = self.data[k]['df']['observations_table']['report_id'][:] \n logging.debug(' Loaded the report_id')\n \n date_time = self.data[k]['df']['observations_table']['date_time'][:]\n logging.debug(' Loaded the date_time (deltas)')\n \n lat , lon = self.data[k]['df']['observations_table']['latitude'][:] , self.data[k]['df']['observations_table']['longitude'][:]\n logging.debug(' Loaded the lat,lon ')\n \n \n self.obs_table_columns = list(self.data[k]['df']['observations_table'].keys() )\n \n self.data[k]['df'].close()\n \n \"\"\" Creating a dataframe \"\"\"\n columns = ['date_time', 'z_coordinate' , 'z_coordinate_type', 'observed_variable' , 'observation_value' , 'report_id' , 'observation_id' , 'latitude' , 'longitude', 'units']\n logging.info(' Loaded the data, creating dataframe ')\n \n df = pd.DataFrame( list(zip( date_time, p_levels, z_type, obs_variable , obs_values, report_id, observation_id , lat , lon, units ) ) , columns = columns ) \n \n \n \"\"\" Storing the dataframe \"\"\" ### try using xarrays ??? \n logging.debug('Storing the DF ' ) \n self.data[k]['dataframe'] = df\n \n logging.debug(' PD dataframe created !!! ')", "def load(self):\n self.data = pd.read_pickle(self.DATE_PKL)\n self.data.index.name = DATE_COL\n\n for hname, h in self.handlers.items():\n print(\"Loading %s\" % hname)\n cur_out = '../'+h.out_path\n df = pd.read_pickle(cur_out).resample('D').ffill() # make daily and forward fill the values\n if hname in self.data.columns:\n # getting to a distinct column:\n i = 2\n while \"%s_%s\" % (hname, i) in self.data.columns:\n i += 1\n print(\"warning: %s was already in the data set, instead we merged new column as %s\" %\n (hname, hname + '_%s' % i))\n self.data = self.data.join(df, how='left', rsuffix=\"_%s\" % i)\n else:\n self.data = self.data.join(df, how='left')", "def make_dataframes(self):\n self._data_frame_30days = pd.DataFrame(self._all30_dict, index=SIRPPipeline.data_frame_INDEX).transpose()\n self._data_frame_60days = pd.DataFrame(self._all60_dict, index=SIRPPipeline.data_frame_INDEX).transpose()\n self._data_frame_90days = pd.DataFrame(self._all90_dict, index=SIRPPipeline.data_frame_INDEX).transpose()\n self._data_frame_counts = pd.DataFrame(\n {\n \"Created\": {\"totals\": self._data_frame_30days.count()[\"Created\"]},\n \"Closed\": {\"totals\": self._data_frame_30days.count()[\"Closed\"]},\n \"Owner\": (self._data_frame_30days[\"Owner\"].value_counts().to_dict()),\n \"Resolution\": (self._data_frame_30days[\"Resolution\"].value_counts().to_dict()),\n \"Severity\": (self._data_frame_30days[\"Severity\"].value_counts().to_dict()),\n },\n index=self.counts_frame_INDEX,\n )\n self._data_frame_counts.fillna(0, inplace=True)", "def load_all_data_from_file(self) -> None:\n self.load_gene_data_from_file()\n self.load_ontology_from_file(ontology_type=DataType.GO, ontology_url=self.go_ontology_url,\n ontology_cache_path=self.go_ontology_cache_path,\n config=self.config)\n self.load_associations_from_file(associations_type=DataType.GO, associations_url=self.go_associations_url,\n associations_cache_path=self.go_associations_cache_path, config=self.config)\n self.load_ontology_from_file(ontology_type=DataType.DO, ontology_url=self.do_ontology_url,\n ontology_cache_path=self.do_ontology_cache_path, config=self.config)\n self.load_associations_from_file(associations_type=DataType.DO, associations_url=self.do_associations_url,\n associations_cache_path=self.do_associations_cache_path,\n association_additional_cache_path=self.do_associations_new_cache_path,\n association_additional_url=self.do_associations_new_url, config=self.config)\n self.load_ontology_from_file(ontology_type=DataType.EXPR, ontology_url=self.expression_ontology_url,\n ontology_cache_path=self.expression_ontology_cache_path, config=self.config)\n self.load_associations_from_file(associations_type=DataType.EXPR,\n associations_url=self.expression_associations_url,\n associations_cache_path=self.expression_associations_cache_path,\n config=self.config)\n self.load_orthology_from_file()\n self.load_expression_cluster_data()\n self.load_protein_domain_information()", "def load_data():\n data_path = os.path.join('qual-o-mat-data', 'data', '2019', 'europa')\n data_keys = [\"answer\", \"comment\", \"opinion\", \"party\", \"statement\"]\n raw_data = dict()\n all_data = dict()\n\n # Create a dictionary of type <string, DataFrame> that contains the data from all JSON files\n for dk in data_keys:\n json_file = os.path.join(data_path, dk + \".json\")\n with open(json_file, \"r\") as fh:\n raw_data[dk] = json.load(fh)\n all_data[dk] = pd.DataFrame(raw_data[dk])\n\n\n # Based on the opinion data, merge all other data frames on their ID fields to get usable names instead of just ID numbers\n merged_df = all_data[\"opinion\"].copy()\n for to_merge in [\"party\", \"statement\", \"comment\", \"answer\"]:\n merged_df = merged_df.merge(all_data[to_merge], how='inner', left_on=[to_merge], right_on=['id'])\n\n #print(mdf.head())\n return merged_df, all_data, raw_data", "def _load_results(self):\n\n _LOG.debug(\"stats colnames: %s\", \", \".join(self._stats_colnames))\n _LOG.debug(\"additional colnames: %s\", \", \".join(self._more_colnames))\n\n for res in self.rsts:\n _LOG.debug(\"hover colnames: %s\", \", \".join(self._hov_colnames[res.reportid]))\n\n colnames = []\n for colname in self._hov_colnames[res.reportid] + self._more_colnames:\n if colname in res.colnames_set:\n colnames.append(colname)\n\n csel = Trivial.list_dedup(self._stats_colnames + colnames)\n res.clear_filts()\n res.set_csel(csel)\n res.load_df()\n\n # We'll be dropping columns and adding temporary columns, so we'll affect the original\n # dataframe. This is more effecient than creating copies.\n self._mangle_loaded_res(res)", "def load_dataframe(self) -> None:\n with open(self.__data_path.split('.')[0] + '_dtypes.json', 'r') as f:\n dtypes = json.load(f)\n self.__DataFrame = pd.read_csv(self.__data_path, dtype=dtypes)\n self.map_items()", "def _reload(self):\n if os.path.exists(self.filename):\n self.data = pd.read_csv(self.filename)\n else:\n self.data = pd.DataFrame(columns=self.unique_keys)\n\n # Set these default values\n # if 'weight_rescale' not in self.data.columns:\n # self.data['weight_rescale'] = 'none'\n # if 'norm' not in self.data.columns:\n # self.data['norm'] = 'softmax'\n # if 'update' not in self.data.columns:\n # self.data['update'] = 'all'\n # if 'replay' not in self.data.columns:\n # self.data['replay'] = False\n if 'debug' not in self.data.columns:\n self.data['debug'] = False\n\n # if 'tie' not in self.data.columns:\n # self.data['tie'] = False\n\n if 'update_length' not in self.data.columns:\n self.data['update_length'] = 0\n # for key in self.unique_keys:\n # self.data[key] = np.nan\n # Remaining set to None\n # for k in self.check_keys:\n # if k not in self.data.columns:\n # self.data[k] = None", "def __init__(self, out_dir = 'output' ):\n \n self.data = {} # will contain the data for each different dataset \n self.datasets = '' # will contain the input datasets (original dictionary)\n self.datasets_keys = '' # will contain the input datasets names only (i.e. keys of the datasets dictionary)\n self.datasets_all = ['igra2' , 'era5_1' , 'ncar_w' , 'ncar_t', 'bufr' , 'era5_1759' , 'era5_1761' , 'era5_3188'] # all possibly available datasets \n #self.observation_ids_merged = { 'igra2':1 , 'ncar_t':2 , 'ncar_w':2, 'bufr':3, 'era5_1':4 , 'era5_1759' :5 , 'era5_1761':6 , 'era5_3188' :7} # values used to convert original record_id to the merged record_id, see method merge_all_data \n \n self.observation_ids_merged = { 'igra2':1 , 'ncar':2, 'bufr':3, 'era5_1':4 , 'era5_1759' :5 , 'era5_1761':6 , 'era5_3188' :7} # values used to convert original record_id to the merged record_id, see method merge_all_data \n \n self.unique_dates = {} \n self.attributes = {} # will keep the original attributes from the CDM tables, read from the netCDF files \n self.out_dir = out_dir", "def _gather_data(self):\n for data in self._collection:\n label = data.label\n label = disambiguate(label, self._data)\n self._data[label] = data", "def _load_data(self):\n\n path_data_x = \\\n '/workspace/base-ml/data/tadpole/adni_one_baseline_feature_data' \\\n '.csv'\n path_data_y = \\\n '/workspace/base-ml/data/tadpole/adni_one_baseline_label_data' \\\n '.csv'\n path_meta = '/workspace/base-ml/data/tadpole' \\\n '/adni_one_baseline_meta_data' \\\n '.csv'\n read_data_x = pd.read_csv(path_data_x)\n read_data_y = pd.read_csv(path_data_y) # 0 NL, 1, MCI, 2 Dementia\n read_data_meta = pd.read_csv(path_meta)[['AGE', 'PTGENDER', 'APOE4']]\n\n # Replace gender to numeric\n read_data_meta.PTGENDER = read_data_meta.PTGENDER.replace('Male', 0)\n read_data_meta.PTGENDER = read_data_meta.PTGENDER.replace('Female', 1)\n\n new_data_x = np.array(read_data_x).astype(np.float32)\n new_data_y = np.array(read_data_y).astype(np.float32)\n new_data_meta = np.array(read_data_meta).astype(np.float32)\n\n # Concat meta-information with feature vector input\n concat_meta = pd.DataFrame(new_data_meta)\n concat_meta.iloc[:, 2] = concat_meta.iloc[:, 2].replace(0, 'zero')\n concat_meta.iloc[:, 2] = concat_meta.iloc[:, 2].replace(1, 'one')\n concat_meta.iloc[:, 2] = concat_meta.iloc[:, 2].replace(2, 'two')\n concat_meta = concat_meta.to_numpy()\n new_data_x = np.concatenate([concat_meta, new_data_x], 1)\n print(new_data_x.shape, new_data_y.shape, new_data_meta.shape)\n\n self.orig_column_names = ['Age', 'Gender', 'APOE4'] + \\\n list(read_data_x.columns)\n self.data_x = new_data_x\n self.data_y = self.to_one_hot_encoding(new_data_y)\n self.numerical_idx = np.arange(new_data_x.shape[-1])\n self.numerical_idx = np.delete(self.numerical_idx, [2]) # Remove APOE column idx\n self.non_num_idx = np.array([2])\n self.all_non_numerical_idx = None\n\n # self.numerical_idx = np.arange(self.data_x.shape[-1])\n # self.non_num_idx = None\n # self.all_non_numerical_idx = None\n\n # Calculate adjacency matrix\n self.meta_inf = new_data_meta.astype('float32')\n if self.args.graph_type:\n self.adj = self.get_adjacency()", "def load_results(fnames=None, base_fname='figure_data_'):\n\n if fnames==None:\n fnames = glob.glob(base_fname + '*.npz')\n\n num_subfunctions = None\n full_objective_period = None\n\n history_nested = {}\n for fn in fnames:\n data = np.load(fn)\n if num_subfunctions is None:\n num_subfunctions = data['num_subfunctions']\n full_objective_period = data['full_objective_period']\n if not (num_subfunctions == data['num_subfunctions'] and full_objective_period == data['full_objective_period']):\n print \"****************\"\n print \"WARNING: mixing data with different numbers of subfunctions or delays between evaluating the full objective\"\n print \"make sure you are doing this intentionally (eg, for the convergence vs., number subfunctions plot)\"\n print \"****************\"\n model_name = data['model_name'].tostring()\n print(\"loading\", model_name)\n if not model_name in history_nested:\n history_nested[model_name] = data['history'][()].copy()\n else:\n print(\"updating\")\n for subkey in history_nested[model_name].keys():\n print subkey\n history_nested[model_name][subkey].update(data['history'][()].copy()[subkey])\n data.close()\n\n return history_nested, num_subfunctions, full_objective_period", "def load_and_fix(self):\n # Read in json\n self.read_json()\n\n if self.size_to_load:\n self.data = self.data[:self.size_to_load]\n\n # Add names from database given _bsn:\n self.extend_dataframe_with_personnames()\n\n # Clean rows in the data_frame where the names column is empty - > thus no response from the database\n self.clean_none_response()\n\n # Fix path from A09.pdf to A09.json\n self.fix_path()\n\n # Get the correct names from the database response\n self.parse_names_from_response()\n\n print(\" --- Final Shape Data ---\")\n print(self.data.shape)\n print(list(self.data))\n\n # Save pickled object in ./data map\n self.save_obj(self.data, self.file_name_to_save)", "def _load_data(self):\n\n path_data_x = '/workspace/base-ml/data/dizzyreg/t%s_df.csv' % \\\n self.task_num\n path_data_y = '/workspace/base-ml/data/dizzyreg/label_df_t%s.csv' % self.task_num\n path_meta = '/workspace/base-ml/data/dizzyreg/meta_df_t%s.csv' % self.task_num\n path_numerical_columns = '/workspace/base-ml/data/dizzyreg/num_columns_v2.csv'\n path_nonnumerical_columns = '/workspace/base-ml/data/dizzyreg/non_num_columns_v2.csv'\n\n read_data_x = pd.read_csv(path_data_x)\n read_data_y = pd.read_csv(path_data_y)\n read_data_meta = pd.read_csv(path_meta)\n\n # Drop columns if it only contains 1 unique element\n read_data_x = pd.DataFrame(self.drop_one_elem_columns(read_data_x))\n\n num_col = pd.read_csv(path_numerical_columns)\n num_col = read_data_x.columns.isin(num_col['0'].values).nonzero()[0]\n col_idx = np.arange(read_data_x.shape[-1])\n non_num_col = np.setdiff1d(col_idx, num_col)\n\n # new_data_x = np.array(read_data_x).astype(np.float32)\n new_data_x = np.array(read_data_x)\n new_data_y = np.array(read_data_y).astype(np.float32)\n new_data_meta = np.array(read_data_meta).astype(np.float32)\n\n print(new_data_x.shape, new_data_y.shape, new_data_meta.shape)\n\n\n # Winsorize dataset\n len_feat = new_data_x.shape[-1]\n idx_list = list(num_col)\n for i in range(len_feat):\n if i in idx_list:\n cur_data = new_data_x[:, i]\n cur_data = np.array(cur_data)\n lower_p = np.percentile(cur_data, 5)\n higher_p = np.percentile(cur_data, 95)\n cur_data[cur_data < lower_p] = lower_p\n cur_data[cur_data > higher_p] = higher_p\n new_data_x[:, i] = cur_data\n\n # Make sure target data is one-hot encoded\n if new_data_y.shape[-1] == 1:\n num_class = len(np.unique(new_data_y))\n new_data_y = np.eye(num_class)[new_data_y.astype(int).reshape(-1)]\n new_data_y = new_data_y.astype('float32')\n self.orig_column_names = read_data_x.columns\n self.data_x = new_data_x # N x F\n self.data_y = new_data_y # N x C\n self.numerical_idx = num_col # list of idx\n self.non_num_idx = non_num_col # None\n\n # Calculate adjacency matrix\n self.meta_inf = new_data_meta.astype('float32') # N x 3\n if self.args.graph_type:\n self.adj = self.get_adjacency()", "def _process_datasets_all_frames(self):\n datasets = os.listdir(self.separated_root)\n for dataset in datasets:\n dataset_path = join(self.separated_root, dataset)\n\n for model in self.models:\n\n attacks_list = os.listdir(dataset_path)\n\n for attack in attacks_list:\n attack_path = join(dataset_path, attack)\n\n for prop in self.properties:\n property_alias = prop.get_property_alias()\n\n if os.path.exists(\n join(self.output_features, dataset, attack, property_alias, model.alias)):\n print('%s already extracted features' % dataset)\n continue\n\n path_train = join(attack_path, self.train_alias)\n path_test = join(attack_path, self.test_alias)\n\n X_train, y_train, indexes_train, samples_train = self._get_dataset_contents(path_train,\n property_alias)\n X_test, y_test, indexes_test, samples_test = self._get_dataset_contents(path_test,\n property_alias)\n\n output_features = join(self.output_features, dataset, attack, property_alias, model.alias)\n\n features_train = self._fetch_features(X_train, model, output_features, self.train_alias)\n features_test = self._fetch_features(X_test, model, output_features, self.test_alias)\n\n # saving features\n np.save(join(output_features, (NAME_FEATURES % self.train_alias)), features_train)\n np.save(join(output_features, (NAME_FEATURES % self.test_alias)), features_test)\n\n # saving targets\n np.save(join(output_features, (NAME_TARGETS % self.train_alias)), y_train)\n np.save(join(output_features, (NAME_TARGETS % self.test_alias)), y_test)\n np.save(join(output_features, (NAME_TARGETS % self.test_alias)), y_test)\n\n # saving samples names\n self.__save_txt(join(output_features, (NAME_SAMPLES % self.train_alias)), samples_train)\n self.__save_txt(join(output_features, (NAME_SAMPLES % self.test_alias)), samples_test)", "def initialize_data(self , station = '', datasets = {} ): \n self.datasets = datasets\n self.datasets_keys = datasets.keys()\n self.station = station\n self.out_name = self.out_dir + '/' + self.station + '_CEUAS_premerged_v0.nc'\n\n self.observations_table_vars = ['date_time', 'z_coordinate' , 'z_coordinate_type', 'observed_variable' , 'observation_value' , 'report_id' , 'observation_id' , 'latitude' , 'longitude', 'units', 'source_id']\n\n \"\"\" Loading the econding of the tables created from the harvester script and to be applied again \"\"\"\n self.encodings = np.load('groups_encodings.npy' , allow_pickle = True ).item()\n self.encodings['era5fb'] = np.load('era5fb_encodings_all.npy' , allow_pickle = True ).item() \n self.dic_type_attributes = np.load('dic_type_attributes.npy',allow_pickle= True).item()\n \n self.era5fb_columns = self.dic_type_attributes['era5fb'].keys()\n\n self.obstab_nans_filled = False \n\n data['cdm_tables'] = {} \n \n \"\"\" Loop over all the datasets \n k: name of the dataset \n v: list of file paths, eg 'era5_1':[filepath_1, filepath_2 ] \"\"\" \n for k,v in self.datasets.items() :\n data[k] = {}\n for F in v:\n \n logging.info(' Dataset ::: *** %s %s ' , k , F ) \n \n data[k][F] = {}\n\n h5py_file = h5py.File(F, 'r')\n data[k][F]['h5py_file'] = h5py_file \n \n a = h5py_file['recordtimestamp']\n \n data[k][F]['recordtimestamp'] = a\n data[k][F]['recordindex'] = h5py_file['recordindex']\n data[k][F]['dateindex'] = h5py_file['dateindex']\n a = h5py_file['recordtimestamp']\n data[k][F]['max_date'] = max(a)\n data[k][F]['min_date'] = min(a)\n \n data[k][F]['counter'] = 0\n\n #######\n # HEADER TABLE\n #######\n head_tab = h5py_file['header_table']\n logging.info('*** header_table')\n data[k][F]['header_table'] = {}\n for var in head_tab.keys():\n if ('string' in var or 'hdrlen' in var): continue\n try: \n data[k][F]['header_table'][var] = (np.array(head_tab[var][:])).astype(self.dic_type_attributes['header_table'][var]['type'] )\n except:\n print('failed convertion type header' , k , ' ' , F , ' ' , var )\n \n ####### \n # STATION CONFIGURATION\n ####### \n d = xr.open_dataset(F , engine = 'h5netcdf' , group = 'station_configuration' , decode_times = False )\n data[k][F]['station_configuration'] = d.to_dataframe()\n logging.debug('Done with %s station_configuration' , str(k) )\n d.close()\n\n ####### \n # SOURCE CONFIGURATION \n ####### \n d = xr.open_dataset(F , engine = 'h5netcdf' , group = 'source_configuration' , decode_times = False )\n data[k][F]['source_configuration'] = d\n logging.debug('Done with %s source_configuration' , str(k) )\n d.close()\n\n\n data['cdm_tables'] = {}\n \"\"\" Reading the CDM tables that do not depend on specific stations or observations (fixed values), for the first file only \"\"\"\n for t in self.standard_cdm: # [ 'crs' , 'observed_variable', 'units' , 'z_coordinate_type' , 'station_type', 'station_configuration_codes'] \n if t not in data['cdm_tables'].keys():\n #data['cdm_tables'][t] = ''\n cdm = xr.open_dataset(F , engine = 'h5netcdf' , group = t )\n data['cdm_tables'][t] = cdm \n\n print(blue + 'Memory used after reading data: ', process.memory_info().rss/1000000000 , cend)\n\n self.data = data\n\n \"\"\" Making all date_times \"\"\"\n self.make_all_datetime()", "def load_data(self,do_print=True,keys_to_load=[]):\n \n if do_print:\n print\n print 'Loading corr_space data, Id = %s'%self.id\n\n\n data= np.load(self.dataPath,mmap_mode='r')\n \n loaded_keys=[]\n \n if len(keys_to_load)==0:\n for k,v in data.items():\n setattr(self,k,v)\n loaded_keys.append(k)\n else: \n for k in keys_to_load:\n setattr(self,k,data[k])\n loaded_keys.append(k)\n\n \n if do_print:\n print 'Loaded variables: '+' '.join(loaded_keys)", "def load_data(self):\n\n X = []; Y = []; P = []; positions={}; goalkeeper = []; defender = []; midfielder = []; forward = []\n label_dict = {}\n with open('columns', 'r') as columns_file:\n columns = columns_file.read().splitlines()\n with open('form_columns', 'r') as form_columns_file:\n form_columns = form_columns_file.read().splitlines()\n\n for csv_file in self.get_csv_files():\n df = pandas.read_csv(csv_file, usecols=columns)\n df = df.fillna(value=0.0)\n replace_outcome = {1.0:1.0, 0.0:-1.0}\n df[\"outcome\"].replace(replace_outcome, inplace=True)\n\n for game_id in df.game_id.unique():\n df_game = df.loc[df['game_id'] == game_id]\n\n df_position_team = df_game.loc[df_game['event_type'] == 'Team set up']\n for index, row in df_position_team.iterrows():\n positions_id = row['player_position'].replace(' ','').split(',')\n involved = row['involved'].replace(' ','').split(',')\n for index, position in enumerate(positions_id):\n if position != 5:\n positions[float(involved[index])] = position\n df_substitute = df_game.loc[df_game['event_type'] == 'Player on']\n for index, row in df_substitute.iterrows():\n if row['player_position'] == 'Defender':\n positions[float(row['player_id'])] = 2\n elif row['player_position'] == 'Midfielder':\n positions[float(row['player_id'])] = 3\n elif row['player_position'] == 'Forward':\n positions[float(row['player_id'])] = 4\n\n\n for player_id in df_game.player_id.unique():\n if player_id == 0.0:\n continue\n df_player = df_game.loc[df_game['player_id'] == player_id]\n features = []\n for feature in form_columns:\n total = (df_player[feature]).sum()\n if total:\n success = (df_player[feature]*df_player['outcome']).sum()\n features.append(success/total)\n else:\n features.append(0.0)\n if str(game_id) in player_rating:\n if str(player_id) in player_rating[str(game_id)]:\n arr = [str(game_id), str(player_id)] + features\n arr.append(player_rating[str(game_id)][str(player_id)])\n if positions[player_id] == 1 or True:\n goalkeeper.append(arr)\n elif positions[player_id] == 2:\n defender.append(arr)\n elif positions[player_id] == 3:\n midfielder.append(arr)\n elif positions[player_id] == 4:\n forward.append(arr)\n\n X.append(features)\n P.append(positions[player_id])\n Y.append(player_rating[str(game_id)][str(player_id)])\n else:\n print (\"Player missing : \", game_id, player_id)\n else:\n print (\"Game missing : \", game_id)\n return X, P, Y, goalkeeper, defender, midfielder, forward", "def loadData():\n #dictionary for datasets\n data = {'twcr': [\"twcr19802010Validation.csv\", \"20CR\"],\n 'era20c': [\"era20c19802010Validation.csv\", \"ERA20C\"],\n 'eraint':[\"eraint19802010Validation.csv\", \"ERA-Interim\"],\n 'merra': [\"merra19802010Validation.csv\", \"MERAA\"],\n 'erafive': [\"erafive19802010Validation.csv\", \"ERA-FIVE\"]\n }\n os.chdir(\"G:\\\\data\\\\allReconstructions\\\\validation\\\\commonPeriodValidation\")\n\n twcrDat = pd.read_csv(data['twcr'][0])\n twcrDat.columns = ['deleteIt','tg', 'lon', 'lat', 'reanalysis', 'corrTwcr', 'rmseTwcr', 'nseTwcr']\n era20cDat = pd.read_csv(data['era20c'][0])\n era20cDat.columns = ['deleteIt','tg', 'long', 'latt', 'reanalysis', 'corrEra20c', 'rmseEra20c', 'nseEra20c']\n eraintDat = pd.read_csv(data['eraint'][0])\n eraintDat.columns = ['deleteIt','tg', 'long', 'latt', 'reanalysis', 'corrEraint', 'rmseEraint', 'nseEraint']\n merraDat = pd.read_csv(data['merra'][0])\n merraDat.columns = ['deleteIt','tg', 'long', 'latt', 'reanalysis', 'corrMerra', 'rmseMerra', 'nseMerra']\n erafiveDat = pd.read_csv(data['erafive'][0])\n erafiveDat.columns = ['deleteIt','tg', 'long', 'latt', 'reanalysis', 'corrErafive', 'rmseErafive', 'nseErafive']\n\n\n return twcrDat, era20cDat, eraintDat, merraDat, erafiveDat", "def _load_data(self):\n\n if not self._cache.exists(config.DATAFRAME_SONG_DATA):\n source_path = os.path.join(config.S3_SONG_DATA, 'A/A/A/*.json') # Note: song database is way big, so we get only a slice of it.\n dataframe = self._get_spark_session().read.json(source_path)\n self._cache.set_source(config.DATAFRAME_SONG_DATA, dataframe)", "def combine_data(self):\n for country in config.COUNTRIES:\n frames = []\n for year in config.years:\n incidence_path = (config.raw_data_path / country / 'complete'\n / (str(year) + '_' + str(year + 1) + '.csv'))\n\n if incidence_path.exists() and incidence_path.is_file():\n df_incidence = pd.read_csv(incidence_path)\n\n wiki_path1 = config.raw_data_path / ('wikipedia_' +\n country) / \\\n 'complete' / (\n str(year) + '.csv')\n wiki_path2 = config.raw_data_path / ('wikipedia_' +\n country) / \\\n 'complete' / (\n str(year + 1) + '.csv')\n\n if wiki_path1.exists() and wiki_path1.is_file():\n df_wiki1 = pd.read_csv(wiki_path1)\n df_wiki1 = df_wiki1.rename(columns={'Week': 'week'})\n df_incidence = pd.merge(\n df_wiki1, df_incidence, on='week', how='right')\n\n if wiki_path2.exists() and wiki_path2.is_file():\n df_wiki2 = pd.read_csv(wiki_path2)\n df_wiki2 = df_wiki2.rename(columns={'Week': 'week'})\n df_incidence = pd.merge(\n df_wiki2, df_incidence, on='week', how='right')\n\n for col_name in df_incidence.columns:\n if col_name[-1] == 'x':\n if col_name[:-2] + '_y' in df_incidence.columns:\n df_incidence[col_name[:-2]] = df_incidence[\n col_name].fillna(\n df_incidence[col_name[:-2] + '_y'])\n df_incidence = df_incidence.drop(\n columns=[col_name,\n col_name[:-2] + '_y'])\n\n frames.append(df_incidence)\n\n df_country = pd.concat(frames)\n df_country['date'] = pd.to_datetime(\n df_country.week.add('-0'), format='%Y-%W-%w')\n df_country = df_country.sort_values(by=\"date\")\n\n if 'cases' in df_country.columns:\n df_country.drop(columns=['cases'])\n\n file_path = config.combined_data_path / (country + '.csv')\n\n df_country.to_csv(file_path, index=False)", "def make_dataframes(folders, file_stem):\n\n print \"Making one big dataframe...\"\n df_orig = load_df(folders, file_stem, n_files=500)\n # df_orig = load_df(folders, \"output\")\n # df_orig = load_df(folders, \"output_ma1Lt11\")\n # df_orig = load_df(folders, \"output_good\")\n\n print len(df_orig.index), 'entries in dataframe'\n\n # Drop columns to save space\n drop_cols = [\n 'h1u', 'h1d', 'h1b', 'h1V', 'h1G', 'h1A',\n 'h2u', 'h2d', 'h2b', 'h2V', 'h2G', 'h2A',\n 'Brh3gg', 'Brh3tautau', 'Brh3bb', 'Brh3ww',\n 'Brh3zz', 'Brh3gammagamma', 'Brh3zgamma',\n 'Brh3h1h1', 'Brh3h2h2', 'Brh3h1h2',\n 'Brh3a1a1', 'Brh3a1z',\n # 'bsgamma', 'bsmumu', 'btaunu', 'delms', 'delmd']\n ]\n\n for col in drop_cols:\n if col in df_orig.columns.values:\n df_orig.drop(col, inplace=True, axis=1)\n print \"After dropping columns:\", df_orig.columns.values, len(df_orig.columns.values), \"columns\"\n\n # Remove any duplicate entries\n df_orig.drop_duplicates(inplace=True)\n\n # Load up the glu-glu cross sections for 13 TeV\n print \"Adding in cross-sections...\"\n # cs = pd.read_csv(\"parton_lumi_ratio.csv\")\n cs = pd.read_csv(\"YR3_cross_sections.csv\")\n masses = cs[\"MH [GeV]\"]\n mass_len = len(masses)\n xsec_ggf13 = cs[\"ggF 13TeV Cross Section [pb]\"]\n xsec_vbf13 = cs[\"VBF 13TeV Cross Section [pb]\"]\n # xsec_wh13 = cs[\"WH 13TeV Cross Section [pb]\"]\n # xsec_zh13 = cs[\"ZH 13TeV Cross Section [pb]\"]\n xsec_ggf8 = cs[\"ggF 8TeV Cross Section [pb]\"]\n xsec_vbf8 = cs[\"VBF 8TeV Cross Section [pb]\"]\n\n def find_closest_mass_ind(mass):\n pos = bisect_left(masses, mass)\n if pos == mass_len:\n return mass_len - 1\n return pos\n\n print 'Storing nearest-mass indices'\n df_orig['mass_ind_h1'] = df_orig.apply(lambda row: find_closest_mass_ind(row['mh1']), axis=1)\n df_orig['mass_ind_h2'] = df_orig.apply(lambda row: find_closest_mass_ind(row['mh2']), axis=1)\n df_orig['mass_ind_h3'] = df_orig.apply(lambda row: find_closest_mass_ind(row['mh3']), axis=1)\n\n # ALL XSEC STORED ARE CORRECTLY SCALED BY REDUCED COUPLING\n print \"Storing 13 TeV gg xsec\"\n df_orig[\"xsec_ggf13_h1\"] = df_orig['h1ggrc2'] * xsec_ggf13[df_orig['mass_ind_h1']].values\n df_orig[\"xsec_ggf13_h2\"] = df_orig['h2ggrc2'] * xsec_ggf13[df_orig['mass_ind_h2']].values\n df_orig[\"xsec_ggf13_h3\"] = df_orig['h3ggrc2'] * xsec_ggf13[df_orig['mass_ind_h3']].values\n\n print \"Storing 13 TeV vbf xsec\"\n df_orig[\"xsec_vbf13_h1\"] = df_orig['h1vvrc2'] * xsec_vbf13[df_orig['mass_ind_h1']].values\n df_orig[\"xsec_vbf13_h2\"] = df_orig['h2vvrc2'] * xsec_vbf13[df_orig['mass_ind_h2']].values\n df_orig[\"xsec_vbf13_h3\"] = df_orig['h3vvrc2'] * xsec_vbf13[df_orig['mass_ind_h3']].values\n\n print \"Storing 8 TeV ggf xsec\"\n df_orig[\"xsec_ggf8_h1\"] = df_orig['h1ggrc2'] * xsec_ggf8[df_orig['mass_ind_h1']].values\n df_orig[\"xsec_ggf8_h2\"] = df_orig['h2ggrc2'] * xsec_ggf8[df_orig['mass_ind_h2']].values\n df_orig[\"xsec_ggf8_h3\"] = df_orig['h3ggrc2'] * xsec_ggf8[df_orig['mass_ind_h3']].values\n\n print \"Storing 8 TeV vbf xsec\"\n df_orig[\"xsec_vbf8_h1\"] = df_orig['h1vvrc2'] * xsec_vbf8[df_orig['mass_ind_h1']].values\n df_orig[\"xsec_vbf8_h2\"] = df_orig['h2vvrc2'] * xsec_vbf8[df_orig['mass_ind_h2']].values\n df_orig[\"xsec_vbf8_h3\"] = df_orig['h3vvrc2'] * xsec_vbf8[df_orig['mass_ind_h3']].values\n\n # Now add in individual channel xsec\n store_channel_xsec(df_orig)\n print df_orig.columns.values\n\n # Make some subsets here:\n print \"Making subsets...\"\n\n # Points passing all experimental constraints chosen\n df_pass_all = subset_pass_constraints(df_orig)\n # df_pass_all = None\n\n # subset with 2m_tau < ma1 < 10\n df_ma1Lt10 = None\n # df_ma1Lt10 = subset_var(df_pass_all, 3.554, 10.5, \"ma1\")\n\n mhmin, mhmax = 122.1, 128.1\n # subset with h1 as h_125\n # df_h1SM = subset_var(df_pass_all, mhmin, mhmax, \"mh1\")\n df_h1SM = None\n\n # subset with h2 as h_125\n # df_h2SM = subset_var(df_pass_all, mhmin, mhmax, \"mh2\")\n df_h2SM = None\n\n n_orig = len(df_orig.index)\n\n def percent_str(numerator, denominator):\n return \"%.3f %% \" % (100*numerator/float(denominator))\n\n print \"Running over\", n_orig, \"points\"\n if isinstance(df_pass_all, pd.DataFrame):\n n_pass_all = len(df_pass_all.index)\n print n_pass_all, \"points passing all constraints (= %s)\" % percent_str(n_pass_all, n_orig)\n # print len(df_ma1Lt10.index), \"of these have 2m_tau < ma1 < 10 GeV (= %s)\" % percent_str(len(df_ma1Lt10.index), n_pass_all)\n # print len(df_h1SM.index), \"points in the h1 = h(125) subset (= %s)\" % percent_str(len(df_h1SM.index), n_pass_all)\n # print len(df_h2SM.index), \"points in the h2 = h(125) subset (= %s)\" % percent_str(len(df_h2SM.index), n_pass_all)\n print \"\"\n\n return df_orig, df_pass_all, df_ma1Lt10, df_h1SM, df_h2SM", "def _load_data() -> UpliftData:\n with project_dir(\"axinova\"):\n data = UpliftData(\n ax_data=load_bin(\"ax_data.feather\"),\n ax_var_struct=load_bin(\"ax_var_struct.feather\"),\n population_codes=load_pickle(\"population_ratios.pkl\"),\n global_codes=load_pickle(\"global_code_ratios.pkl\"),\n station_codes=load_pickle(\"station_code_ratios.pkl\"),\n spr_data=load_pickle(\"spr_data.pkl\"),\n )\n data.all_stations = data.ax_data[\"Station\"].cat.categories.to_list()\n data.all_weekdays = data.ax_data[\"DayOfWeek\"].cat.categories.to_list()\n data.all_timescales = [\"Time\", \"ShortTime\", \"Hour\", \"TimeSlot\"]\n data.var_info = {}\n for (var_id, struct) in data.ax_var_struct.groupby(\"Variable\"):\n data.var_info[var_id] = dict(\n Label=struct[\"Variable_Label\"].max(),\n Codes=struct[\"Label\"].to_list(),\n Order=list(range(len(struct[\"Label_Nr\"].to_list()))),\n )\n data.combi_var = {\n \"md_SexAgeEk\": (\n data.variable_table(\"md_SexAgeEk\")\n .iloc[:, 0]\n .str.split(\"/ \", expand=True)\n .rename(columns={0: \"md_sex\", 1: \"md_agenatrep\", 2: \"md_ek\"})\n )\n }\n return data", "def __processing_forms_and_tables(self, full_df):\n \n tables_dataframes = {}\n\n for table_name in self.metainfo[\"tablas_salida\"]:\n \n logging.info(\"META TABLE: {}\".format(self.metainfo[table_name]))\n # generate a df with specific table sheet\n if self.metainfo[table_name].get(\"columnas\") is not None:\n hoja = self.metainfo[table_name]['hoja']\n table_df = self.generate_table(full_df[\"tabla\"][hoja], self.metainfo[table_name]['columnas'])\n # generate a df with specific cells in the full_df\n elif self.metainfo[table_name].get(\"record\") is not None:\n table_df = self.__generate_record(full_df, self.metainfo[table_name])\n\n logging.info(\"TABLE DF AFTER LOAD: \\n {}\".format(table_df))\n\n tables_dataframes[table_name] = table_df\n \n return tables_dataframes", "def _combine_vars(self, obj_types='all', var_keys='all'):\n\n # Retrieve variables\n if 'variables' in self:\n vs = self['variables']\n else:\n return None\n if isinstance(vs, pd.DataFrame):\n return vs # Return df if vs is already a df\n elif isinstance(vs, DataDict) and len(vs.keys()) == 1:\n return list(vs.values())[0] # Return df if vs has only one entry\n elif isinstance(vs, (dict,DataDict)):\n df_dict = dict(vs) # Convert to dict if vs is DataDict\n else:\n raise TypeError(\"DataDict.variables must be of type dict,\"\n \"agentpy.DataDict, or pandas.DataFrame.\")\n\n # Remove dataframes that don't include any of the selected var_keys\n if var_keys != 'all':\n df_dict = {k: v for k, v in df_dict.items()\n if any(x in v.columns for x in make_list(var_keys))}\n\n # Select object types\n if obj_types != 'all':\n df_dict = {k: v for k, v in df_dict.items()\n if k in make_list(obj_types)}\n\n # Add 'obj_id' before 't' for model df\n model_type = self.log['model_type']\n if model_type in list(df_dict.keys()):\n df = df_dict[model_type]\n df['obj_id'] = 0\n indexes = list(df.index.names)\n indexes.insert(-1, 'obj_id')\n df = df.reset_index()\n df = df.set_index(indexes)\n df_dict[model_type] = df\n\n # Return none if empty\n if df_dict == {}:\n return None\n\n # Create dataframe\n df = pd.concat(df_dict) # Dict keys (obj_type) will be added to index\n df.index = df.index.set_names('obj_type', level=0) # Rename new index\n\n # Select var_keys\n if var_keys != 'all':\n # make_list prevents conversion to pd.Series for single value\n df = df[make_list(var_keys)]\n\n return df", "def _convertAndFix(self):\r\n\r\n logStr = \"{0:s}.{1:s}: \".format(self.__class__.__name__, sys._getframe().f_code.co_name)\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'Start.')) \r\n \r\n try: \r\n \r\n if 'SWVT_ROWT' not in self.dataFrames:\r\n self.dataFrames['SWVT_ROWT']=self._constructEmptyDf(['pk','fk','ZEIT','W'])\r\n self.dataFrames['SWVT']=self._constructEmptyDf(['pk','NAME','BESCHREIBUNG','INTPOL','ZEITOPTION'])\r\n\r\n if not self.dataFrames['SWVT_ROWT'].empty:\r\n self.dataFrames['SWVT_ROWT'].ZEIT=self.dataFrames['SWVT_ROWT'].ZEIT.str.replace(',', '.')\r\n self.dataFrames['SWVT_ROWT'].W=self.dataFrames['SWVT_ROWT'].W.str.replace(',', '.')\r\n\r\n if 'LFKT_ROWT' not in self.dataFrames:\r\n self.dataFrames['LFKT_ROWT']=self._constructEmptyDf(['pk','fk','ZEIT','LF']) \r\n self.dataFrames['LFKT']=self._constructEmptyDf(['pk','NAME','BESCHREIBUNG','INTPOL','ZEITOPTION'])\r\n\r\n if not self.dataFrames['LFKT_ROWT'].empty:\r\n self.dataFrames['LFKT_ROWT'].ZEIT=self.dataFrames['LFKT_ROWT'].ZEIT.str.replace(',', '.')\r\n self.dataFrames['LFKT_ROWT'].LF=self.dataFrames['LFKT_ROWT'].LF.str.replace(',', '.')\r\n\r\n if 'QVAR_ROWT' not in self.dataFrames:\r\n self.dataFrames['QVAR_ROWT']=self._constructEmptyDf(['pk','fk','ZEIT','QM']) \r\n self.dataFrames['QVAR']=self._constructEmptyDf(['pk','NAME','BESCHREIBUNG','INTPOL','ZEITOPTION'])\r\n\r\n if not self.dataFrames['QVAR_ROWT'].empty:\r\n self.dataFrames['QVAR_ROWT'].ZEIT=self.dataFrames['QVAR_ROWT'].ZEIT.str.replace(',', '.')\r\n self.dataFrames['QVAR_ROWT'].QM=self.dataFrames['QVAR_ROWT'].QM.str.replace(',', '.')\r\n\r\n if 'PVAR_ROWT' not in self.dataFrames:\r\n self.dataFrames['PVAR_ROWT']=self._constructEmptyDf(['pk','fk','ZEIT','PH']) \r\n self.dataFrames['PVAR']=self._constructEmptyDf(['pk','NAME','BESCHREIBUNG','INTPOL','ZEITOPTION'])\r\n\r\n if not self.dataFrames['PVAR_ROWT'].empty:\r\n self.dataFrames['PVAR_ROWT'].ZEIT=self.dataFrames['PVAR_ROWT'].ZEIT.str.replace(',', '.')\r\n self.dataFrames['PVAR_ROWT'].PH=self.dataFrames['PVAR_ROWT'].PH.str.replace(',', '.')\r\n\r\n # 1st Time without Value?!\r\n self.dataFrames['SWVT_ROWT']=self.dataFrames['SWVT_ROWT'].fillna(0) \r\n self.dataFrames['LFKT_ROWT']=self.dataFrames['LFKT_ROWT'].fillna(0) \r\n self.dataFrames['QVAR_ROWT']=self.dataFrames['QVAR_ROWT'].fillna(0) \r\n self.dataFrames['PVAR_ROWT']=self.dataFrames['PVAR_ROWT'].fillna(0) \r\n \r\n # Template Node\r\n self.dataFrames['KNOT']=self.dataFrames['KNOT'][self.dataFrames['KNOT'].NAME.fillna('').astype(str).isin(['TemplateNode','TemplNode-VL','TemplNode-RL'])==False] \r\n \r\n # TE only in Heatingmodels ? ...\r\n try:\r\n isinstance(self.dataFrames['KNOT_BZ']['TE'],pd.core.series.Series)\r\n except:\r\n logger.debug(\"{:s}Error: {:s}: {:s}.\".format(logStr,\"self.dataFrames['KNOT_BZ']['TE']\",'TE only in Heatingmodels?!')) \r\n self.dataFrames['KNOT_BZ']['TE']=pd.Series(dtype='object') # The default dtype for empty Series will be 'object' instead of 'float64' in a future version. Specify a dtype explicitly to silence this warning. \r\n\r\n # FWVB LFK\r\n if 'FWVB' in self.dataFrames:\r\n try:\r\n isinstance(self.dataFrames['FWVB']['LFK'],pd.core.series.Series)\r\n except:\r\n logger.debug(\"{:s}Error: {:s}: {:s}.\".format(logStr,\"self.dataFrames['FWVB']['LFK']\",'LFK not set?!')) \r\n self.dataFrames['FWVB']['LFK']=pd.Series(dtype='object') # The default dtype for empty Series will be 'object' instead of 'float64' in a future version. Specify a dtype explicitly to silence this warning.\r\n self.dataFrames['FWVB']['LFK'].fillna(value=1,inplace=True)\r\n\r\n # Models with only one Standard LTGR ...\r\n try:\r\n isinstance(self.dataFrames['LTGR']['BESCHREIBUNG'],pd.core.series.Series)\r\n except:\r\n self.dataFrames['LTGR']['BESCHREIBUNG']=pd.Series(dtype='object') # The default dtype for empty Series will be 'object' instead of 'float64' in a future version. Specify a dtype explicitly to silence this warning. \r\n\r\n # Models with old DTRO_ROWD \r\n for attrib in ['AUSFALLZEIT','PN','REHABILITATION','REPARATUR','WSTEIG','WTIEFE']:\r\n try:\r\n isinstance(self.dataFrames['DTRO_ROWD'][attrib],pd.core.series.Series)\r\n except:\r\n self.dataFrames['DTRO_ROWD'][attrib]=pd.Series(dtype='object') # The default dtype for empty Series will be 'object' instead of 'float64' in a future version. Specify a dtype explicitly to silence this warning. \r\n\r\n # Models with no CONTs ...\r\n try:\r\n isinstance(self.dataFrames['CONT']['LFDNR'],pd.core.series.Series)\r\n except:\r\n self.dataFrames['CONT']['LFDNR']=pd.Series(dtype='object') # The default dtype for empty Series will be 'object' instead of 'float64' in a future version. Specify a dtype explicitly to silence this warning. \r\n try:\r\n isinstance(self.dataFrames['CONT']['GRAF'],pd.core.series.Series)\r\n except:\r\n self.dataFrames['CONT']['GRAF']=pd.Series(dtype='object') # The default dtype for empty Series will be 'object' instead of 'float64' in a future version. Specify a dtype explicitly to silence this warning. \r\n\r\n # Models with no PZONs ...\r\n if not 'PZON' in self.dataFrames: \r\n self.dataFrames['PZON']=pd.DataFrame() \r\n self.dataFrames['PZON']['NAME']=pd.Series(dtype='object') # The default dtype for empty Series will be 'object' instead of 'float64' in a future version. Specify a dtype explicitly to silence this warning. \r\n self.dataFrames['PZON']['pk']=pd.Series(dtype='object') # The default dtype for empty Series will be 'object' instead of 'float64' in a future version. Specify a dtype explicitly to silence this warning. \r\n\r\n # Models with no STOFs ...\r\n if not 'STOF' in self.dataFrames: \r\n # BESCHREIBUNG\r\n self.dataFrames['STOF']=self._constructEmptyDf(['pk','NAME','BESCHREIBUNG']) \r\n\r\n # Models with no GMIXs ...\r\n if not 'GMIX' in self.dataFrames: \r\n self.dataFrames['GMIX']=self._constructEmptyDf(['pk','NAME']) \r\n \r\n # empty WBLZ OBJS-BLOBs\r\n if 'WBLZ' in self.dataFrames.keys():\r\n self.dataFrames['WBLZ']=self.dataFrames['WBLZ'][pd.notnull(self.dataFrames['WBLZ']['OBJS'])] \r\n # empty LAYR OBJS-BLOBs\r\n if 'LAYR' in self.dataFrames.keys():\r\n if 'OBJS' in self.dataFrames['LAYR'].columns:\r\n self.dataFrames['LAYR']=self.dataFrames['LAYR'][pd.notnull(self.dataFrames['LAYR']['OBJS'])] \r\n\r\n # BESCHREIBUNG nicht in RLVG?...\r\n if 'RLVG' in self.dataFrames: \r\n try:\r\n isinstance(self.dataFrames['RLVG']['BESCHREIBUNG'],pd.core.series.Series)\r\n except:\r\n logger.debug(\"{:s}Error: {:s}: {:s}.\".format(logStr,\"self.dataFrames['RLVG']['BESCHREIBUNG']\",'BESCHREIBUNG nicht in RLVG?...')) \r\n self.dataFrames['RLVG']['BESCHREIBUNG']=pd.Series(dtype='object') # The default dtype for empty Series will be 'object' instead of 'float64' in a future version. Specify a dtype explicitly to silence this warning. \r\n\r\n # BESCHREIBUNG nicht in RADD?...\r\n if 'RADD' in self.dataFrames: \r\n try:\r\n isinstance(self.dataFrames['RADD']['BESCHREIBUNG'],pd.core.series.Series)\r\n except:\r\n logger.debug(\"{:s}Error: {:s}: {:s}.\".format(logStr,\"self.dataFrames['RADD']['BESCHREIBUNG']\",'BESCHREIBUNG nicht in RADD?...')) \r\n self.dataFrames['RADD']['BESCHREIBUNG']=pd.Series(dtype='object') # The default dtype for empty Series will be 'object' instead of 'float64' in a future version. Specify a dtype explicitly to silence this warning. \r\n\r\n # RSLW: WMIN/WMAX nicht immer vorhanden? ...\r\n if 'RSLW' in self.dataFrames: \r\n try:\r\n isinstance(self.dataFrames['RSLW']['WMIN'],pd.core.series.Series)\r\n except:\r\n logger.debug(\"{:s}Error: {:s}: {:s}.\".format(logStr,\"self.dataFrames['RSLW']['WMIN']\",'WMIN nicht vorhanden?!')) \r\n self.dataFrames['RSLW']['WMIN']=pd.Series(dtype='object') # The default dtype for empty Series will be 'object' instead of 'float64' in a future version. Specify a dtype explicitly to silence this warning. \r\n try:\r\n isinstance(self.dataFrames['RSLW']['WMAX'],pd.core.series.Series)\r\n except:\r\n logger.debug(\"{:s}Error: {:s}: {:s}.\".format(logStr,\"self.dataFrames['RSLW']['WMAX']\",'WMAX nicht vorhanden?!')) \r\n self.dataFrames['RSLW']['WMAX']=pd.Series(dtype='object') # The default dtype for empty Series will be 'object' instead of 'float64' in a future version. Specify a dtype explicitly to silence this warning. \r\n \r\n except Exception as e:\r\n logStrFinal=\"{:s}Exception: Line: {:d}: {!s:s}: {:s}\".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))\r\n logger.error(logStrFinal) \r\n raise XmError(logStrFinal) \r\n finally:\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'_Done.'))", "def load_full(self):\n\t\tfor filename in self.FILENAMES:\n\t\t\tself.load(filename)\n\t\tself.reverse_dicts()", "def load_data(self):\n filename = filedialog.askopenfilename(title=\"Select A File\",\n file=((\"csv files\", \"*.csv\"),\n (\"dat files\", \"*.dat\"),\n (\"excel files\", \"*.xlsx\"),\n (\"All Files\", \"*.*\")))\n file_path = filename\n try:\n filename = f\"{file_path}\"\n name = os.path.splitext(os.path.basename(filename))[0]\n if name in ['h2o', 'KED', 'financial']:\n DataLoader.data = load_data(name)\n else:\n DataLoader.data = pd.read_csv(filename)\n except ValueError:\n messagebox.showerror(\"Information\", \"The file you have chosen is invalid.\")\n except FileNotFoundError:\n messagebox.showerror(\"Information\", f\"No such file as {file_path}\")\n self.clear_tree()\n\n self.treeview['columns'] = list(DataLoader.data.columns)\n for i in self.treeview['columns']:\n self.treeview.column(i, anchor=\"w\")\n self.treeview.heading(i, text=i, anchor='w')\n\n for index, row in DataLoader.data.iterrows():\n self.treeview.insert(\"\", 0, text=self.data.shape[0] - 1 - index, values=list(row))\n self.treeview.column('#0', width=100)\n\n self.summary_label = ttk.Label(self, text=f'Data shape: {DataLoader.data.shape}', width=40)\n self.summary_label.grid(row=2, column=0, columnspan=2, sticky=tk.S + tk.N)", "def load_data(self):", "def plot_data(self, data, backup_frame):\n title = self.filename.split('-')\n final_titles = title[2].split('.')\n self.final_title_sub = final_titles[0].lower()\n\n # Accounts for the three types of graph required\n # date for archival purposes\n # web for the web server and\n # log for the logarithmic graphs\n graph_list = ['date', 'web', 'log']\n for mode in graph_list:\n for column in data.columns:\n data['Rest of the World'] = \\\n backup_frame['Global_Cases'] - data[column]\n x_axis = data.index.values\n\n fig, axes = plt.subplots()\n axes.plot(x_axis, data[column], marker='o',\n label=column)\n axes.plot(x_axis, data['Rest of the World'], marker='s',\n label='Rest of the World')\n fig.autofmt_xdate()\n\n every_nth = 4\n for number, label in enumerate(axes.xaxis.get_ticklabels()):\n if number % every_nth != 0:\n label.set_visible(False)\n\n axes.set(xlabel='Date', ylabel='Cases',\n title=f'Covid-19 {self.final_title_sub} '\n f'cases for {column} - data from '\n f'John Hopkins CSSE')\n axes.grid()\n axes.legend()\n\n # Setting the y-axis\n if mode == 'log':\n axes.set_yscale('log')\n else:\n data_max = data.max(axis=1)\n max_number = data_max[-1]\n rounded_max = self.round_up(max_number, -3)\n rounded_max += 2000\n axes.set_ylim([0, rounded_max])\n\n # -----------------------------------------------------\n # Adds Labels to annotate the last data point for each\n # plot\n y_axis1 = data[column][-1]\n y_axis2 = data['Rest of the World'][-1]\n\n plt.annotate(y_axis1, (x_axis[-1], y_axis1 + 500),\n bbox=dict(facecolor='blue', alpha=0.5),\n fontsize=12)\n plt.annotate(y_axis2, (x_axis[-1], y_axis2 + 500),\n bbox=dict(facecolor='red', alpha=0.5),\n fontsize=12)\n # -----------------------------------------------------\n\n # Required in order to stop the column from summing\n # the total of each run through the loop\n # otherwise this leads to Rest of World values in the\n # millions\n data = data.drop('Rest of the World', axis=1)\n\n if mode == 'log':\n dir_name = f'{self.out_dir}docs/graphics/' \\\n f'log_' \\\n f'{self.final_title_sub}_for_' \\\n f'{column}.png'\n elif mode == 'date':\n dir_name = f'{self.out_dir}docs/graphics/' \\\n f'{x_axis[-1]}-2020-' \\\n f'{self.final_title_sub}_for_{column}.png'\n\n elif mode == 'web':\n dir_name = f'{self.out_dir}docs/graphics/' \\\n f'{self.final_title_sub}_for_{column}.png'\n\n else:\n print('error')\n\n fig.savefig(dir_name, transparent=False, dpi=300,\n bbox_inches=\"tight\")\n\n if os.path.exists(dir_name):\n logging.debug('File saved at: %s', {dir_name})\n print(f'Files saved at:\\n'\n f'{dir_name}\\n')\n else:\n logging.debug('Failed to save')\n logging.debug(os.getcwd())\n plt.close()\n return data", "def load_all_dfs(clf_list = ['test_small','rt_small','test2_small']):\n \n start = time.clock()\n print('loading data')\n first_clf = clf_list[0]\n df = pd.read_csv('Pikki'+first_clf+'.csv')\n df['df'] = first_clf\n\n df = df.set_index(['id','df'])\n\n for clf in clf_list[1:]:\n file_name = 'Pikki' + clf + '.csv'\n df_tmp = pd.read_csv(file_name)\n df_tmp['df'] = clf\n\n df_tmp = df_tmp.set_index(['id','df'])\n\n df = pd.concat([df,df_tmp])\n\n \n df['std'] = df.apply(np.std,axis=1,raw = True)\n end = time.clock()\n print(end-start)\n return df#.swaplevel(0,1)", "def __init__(self, out_dir = 'output' ):\n\n self.data = {} # will contain the data for each different dataset \n self.datasets = '' # will contain the input datasets (original dictionary)\n self.datasets_keys = '' # will contain the input datasets names only (i.e. keys of the datasets dictionary)\n #self.datasets_all = ['era5_2_2'] # all possibly available datasets \n\n self.unique_dates = {} \n self.attributes = {} # will keep the original attributes from the CDM tables, read from the netCDF files \n self.id_string_length = 14 # fixed length for record_id and observation_id values \n self.out_dir = out_dir \n self.variable_types = {}\n self.observation_ids_merged = { 'igra2':b'3' , \n 'ncar':b'4', \n 'bufr':b'5', \n 'era5_1':b'1' , \n 'era5_2':b'2', \n 'era5_1759' :b'6' , \n 'era5_1761':b'7' , \n 'era5_3188' :b'8' } # values used to convert original record_id to the merged record_id, see method merge_all_data \n\n logging.info('*** Initialising the Merging procedure ***' ) \n #self.era5b_columns = [] # stores the columns of the era5fb \n self.standard_cdm = [ 'crs' , 'observed_variable', 'units' , 'z_coordinate_type' , 'station_type', 'station_configuration_codes'] \n self.slice_size = 3000\n self.index_offset = 0 # will be replaced when running \n self.hour_time_delta = 60 * 60 * 2 # decide up to which time shift records are considered identical \n \n \n self.only_std_plevels = False # set to True to store only standard pressure level data \n self.std_plevs = [1000, 2000, 3000, 5000, 7000, 10000, 15000, 20000, 25000, 30000, 40000, 50000, 70000, 85000, 92500, 100000]", "def load_data(self, X, loss):\n\n self.X = X\n self.tags = pd.DataFrame(loss)\n\n self.index = [_ALL]\n\n self.X_all = pd.concat([self.X_all , self.X], axis = 0, ignore_index=True)\n self.tags_all = pd.concat([self.tags_all, self.tags], axis = 0, ignore_index=True)", "def get_data():\n wga_df = pd.read_csv(os.path.join(MODEL_DIR, 'wga/sculptures/wga_sculpture_periods.csv'), index_col=0)\n wikiart_df = pd.read_csv(os.path.join(MODEL_DIR, 'wikiart/sculptures/wikiart_sculpture_periods.csv'), index_col=0)\n nga_df = pd.read_csv(os.path.join(MODEL_DIR, 'nga/sculptures/nga_sculpture_periods.csv'), index_col=0)\n\n ######## Fix name for WGA and WikiaRt ###########\n wga_df['Author'] = wga_df.apply(lambda x: fix_name_wga(x['Author']), axis=1)\n wikiart_df['Author'] = wikiart_df.apply(lambda x: fix_name_wiki(x['Author']), axis=1)\n nga_df['Author'] = nga_df.apply(lambda x: fix_name_nga(x['Author']), axis=1)\n\n df = pd.concat([wga_df, wikiart_df, nga_df], ignore_index=True, sort=True)\n\n df['Author_Fixed'] = df.apply(lambda x: fix_text(x['Author']), axis=1)\n df['title_fixed'] = df.apply(lambda x: fix_text(x['title']), axis=1)\n\n periods = [\"BAROQUE\", \"EARLY RENAISSANCE\", \"MEDIEVAL\", \"NEOCLASSICISM\", \"HIGH RENAISSANCE\", \"MINIMALISM\", \"REALISM\",\n \"IMPRESSIONISM\", \"ROCOCO\", \"SURREALISM\", \"MANNERISM\", \"ROMANTICISM\",\n ]\n df['Period'] = df.apply(lambda row: row['Period'].upper(), axis=1)\n\n # Get Desired Periods\n df['Period'] = df.apply(lambda x: \"SURREALISM\" if \"SURREALISM\" in x['Period'] else x['Period'], axis=1)\n df = df[(df['Period'].isin(periods))]\n df = df.sort_values(['Author_Fixed', 'title_fixed'])\n\n #print(\"Combined Drop Rows:\", df.shape[0] - df.drop_duplicates(subset=['Author_Fixed', 'title_fixed']).shape[0])\n\n df = df.drop_duplicates(subset=['Author_Fixed', 'title_fixed'], keep='last')\n\n # Drop Duplicate Sculptures\n df = df[~df['file'].isin(DUP_SCULPTURES)].reset_index(drop=True)\n\n #print(df['Period'].value_counts())\n\n return df", "def initialize_data(self , station = '', datasets = {} ):\n \n self.datasets = datasets\n self.datasets_keys = datasets.keys()\n self.station = station\n \n data = {} # container for the data of each dataset\n source_configuration = {} # container for the source_configuration of each dataset\n \n\n \n \"\"\" Looping over the datasets \"\"\"\n logging.info('*** Reading and Initializing the data from the netCDF files ')\n \n \n for k,v in datasets.items() :\n logging.info(' Initialising the dataset: *** %s ' , k )\n data[k] = {} \n data['cdm_tables'] = {} \n \n ### alternative with xarray \n #ds = xr.load_dataset(v) \n #observations_table = xr.open_dataset(v , engine = 'h5netcdf' , group = 'observations_table') \n \n ### alternative with netCDF4\n #ds = nc.Dataset(v) \n #data[k]['dateindex'] = ds.variables['dateindex'][0,:] # storing the dateindex \n \n ###for h5py but cant extract date time units !!!\n ds = h5py.File(v , driver=\"core\" ) \n data[k]['df'] = ds # storing the entire file \n try: \n data[k]['source_file'] = ds['source_configuration']['source_file'][0]\n except:\n data[k]['source_file'] = str(v) # temp fix \n \n #data[k]['product_code'] = ds['source_configuration']['product_code'][0] \n #data[k]['recordtimestamp'] = ds['recordtimestamp'].value\n #data[k]['recordindex'] = ds['recordindex'].value \n #ds.close() \n logging.debug('Reading the file with h5py ')\n \n \n # add here appending datasets for the case of ncar_w and ncar_t \n \n \n self.data = data\n self.make_dataframe()\n ds.close()\n \n \"\"\" Reading the header_table, station_configuration, source_configuration \"\"\"\n for k,v in datasets.items() : \n \n #d = xr.open_dataset(v , engine = 'h5netcdf' ) \n #data[k]['recordtimestamp'] = d['recordtimestamp'].values\n #data[k]['recordindex'] = d['recordindex'].values \n \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'station_configuration') \n data[k]['station_configuration'] = d.to_dataframe() \n #data[k]['station_configuration'] = d ### USELESS ? \n logging.debug('Done with %s station_configuration' , str(k) )\n \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'header_table') \n logging.debug('Loading the header_table') \n if 'header_table' not in list( self.attributes.keys() ): # saving the attributes to be re-applied at the end\n self.attributes['header_table'] = {}\n for var in d.variables:\n self.attributes['header_table'][var] = {}\n self.attributes['header_table'][var]['description'] = d[var].description\n self.attributes['header_table'][var]['external_table'] = d[var].external_table \n data[k]['header_table'] = d.to_dataframe() \n logging.debug('Done with %s ' , k )\n \n logging.info(\"*** Loading the observations_table (might take time) %s\" , k ) \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'observations_table') \n \n if 'observations_table' not in list( self.attributes.keys() ): # saving the attributes to be re-applied at the end\n self.attributes['observations_table'] = {}\n for var in d.variables:\n self.attributes['observations_table'][var] = {}\n self.attributes['observations_table'][var]['description'] = d[var].description\n self.attributes['observations_table'][var]['external_table'] = d[var].external_table\n \n \n logging.info(\"*** Loading the source configuration %s\" , k ) \n try: \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'source_configuration')\n d = d.isel(hdrlen=[0])\n data[k]['source_configuration'] = d.to_dataframe() ### USELESS ? \n logging.debug('Done with %s source_configuration' , k )\n except: \n data[k]['source_configuration']= pd.DataFrame(np.array( [ [ self.data[k]['source_file'] ] ] ) , columns=['source_file'] ) \n \n if k == 'era5_1': # reading the whole era5_1 feedback (including reanalysis)\n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'era5fb') \n data[k]['era5fb'] = d.to_dataframe() \n logging.debug('Done with %s era5 feedback ', k )\n \n \"\"\" Reading the CDM tables that do not depend on specific stations or observations (fixed values), for the first file only \"\"\" \n if list(datasets.keys()).index(k) == 0 :\n for t in [ 'crs' , 'observed_variable', 'units' , 'z_coordinate_type' , 'station_type']: \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = t) \n #data['cdm_tables'][t] = d.to_dataframe() ### USELESS ?\n data['cdm_tables'][t] = d \n \n d.close() \n ds.close()\n\n \"\"\" Reading the name of the original source file \"\"\"\n source_configuration[k] = {} \n source_configuration[k]['source_file'] = [ c for c in v.split('/') if '.nc' in c][0]\n\n \n \"\"\" Storing the station configurations \"\"\" \n self.source_configuration = source_configuration \n \n \"\"\" Making all date_times \"\"\" \n self.make_all_datetime()\n \n \n \"\"\" feedback columns \"\"\"\n if 'era5_1' in list (self.data.keys() ):\n self.fb_columns = list(self.data['era5_1']['era5fb'].columns ) \n else:\n self.fb_columns = ['empty']", "def load_data1(self, X, loss):\n self.X_all = X\n self.tags_all = pd.DataFrame(loss)\n self.index = [_ALL]", "def load_all_archived_data(years=default.arch_data_years):\n\tdataframes = []\n\tdrive_arr = []\n\tplay_arr = []\n\tfor year in years:\n\t\tdir_ = os.path.join('data', 'archived_data', str(year))\n\t\tfname = os.path.join(dir_, 'team-game-statistics.csv')\n\t\ttmp_df = pd.read_csv(fname)\n\t\t# Append season\n\t\tseason = [year for _ in range(tmp_df.shape[0])]\n\t\ttmp_df['Season'] = pd.Series(season, index=tmp_df.index)\n\t\tdataframes.append(tmp_df)\n\t\t# Read in plays and drives\n\t\tdrive_arr.append(pd.read_csv(os.path.join(dir_, 'drive.csv')))\n\t\tplay_arr.append(pd.read_csv(os.path.join(dir_, 'play.csv')))\n\tall_data = pd.concat(dataframes)\n\tdrives = pd.concat(drive_arr)\n\tplays = pd.concat(play_arr)\n\t# Add dates\n\tdates_raw = [d%1e8 for d in all_data['Game Code']]\n\tdates = [datetime.datetime(year=int(d/1e4), month=int((d/1e2)%1e2), day=int(d%1e2))\n\t\tfor d in dates_raw]\n\tall_data['DateUtc'] = pd.Series(dates, index=all_data.index)\n\t# Add total 1st downs\n\ttot_first_down = (all_data['1st Down Pass'] + \n\t\tall_data['1st Down Rush'] + all_data['1st Down Penalty'])\n\tall_data['1st Downs'] = tot_first_down\n\t# Add conversion pct\n\tthird_down_conv = all_data['Third Down Conv'] / all_data['Third Down Att']\n\tall_data['3rd Down Conv'] = third_down_conv.replace(np.nan, 0.)\n\tfourth_down_conv = all_data['Fourth Down Conv'] / all_data['Fourth Down Att']\n\tall_data['4th Down Conv'] = fourth_down_conv.replace(np.nan, 0.)\n\t# Add special teams / defensive TDs\n\tall_data['DEF TDs'] = all_data['Fum Ret TD'] + all_data['Int Ret TD']\n\tall_data['Special Teams TDs'] = all_data['Kickoff Ret TD'] + all_data['Punt Ret TD']\n\t# Total yards\n\tall_data['Total Yards'] = all_data['Pass Yard'] + all_data['Rush Yard']\n\t# Total drives and plays\n\tnDrives = []\n\tnPlays = []\n\tfor row, game in all_data.iterrows():\n\t\t# Get matching games then matching drives\n\t\tdr_games = drives[drives['Game Code'] == game['Game Code']]\n\t\tpl_games = plays[plays['Game Code'] == game['Game Code']]\n\t\tdr_match = dr_games[dr_games['Team Code'] == game['Team Code']]\n\t\tpl_match = pl_games[pl_games['Offense Team Code'] == game['Team Code']]\n\t\tnDrives.append(dr_match.shape[0])\n\t\tnPlays.append(pl_match.shape[0])\n\tall_data['Total Drives'] = pd.Series(nDrives, index=all_data.index)\n\tall_data['Total Plays'] = pd.Series(nPlays, index=all_data.index)\n\t# Yards per\n\tall_data['Yards Per Pass'] = (all_data['Pass Yard'] / all_data['Pass Att']).replace(np.nan, 0.)\n\tall_data['Yards Per Play'] = (all_data['Total Yards'] / all_data['Total Plays']).replace(np.nan, 0.)\n\tall_data['Yards per Rush'] = (all_data['Rush Yard'] / all_data['Rush Att']).replace(np.nan, 0.)\n\t# Is home\n\thome_codes = (all_data['Game Code'].values / 1e12).astype(int)\n\tall_data['is_home'] = np.array(all_data['Team Code'] == home_codes).astype(int)\n\t# Total turnovers\n\tall_data['Turnovers'] = all_data['Pass Int'] + all_data['Fumble Lost']\n\t# Other (calc later)\n\tall_data['conferenceId'] = 0\n\tfor field in default.this_elo_fields:\n\t\tall_data[field[5:]] = 0\n\t# Rename fields and ids to match new data\n\tall_data = rename_fields(all_data)\n\tall_data = map_team_conf_fields(all_data)\n\tall_data = combine_games(all_data)\n\tall_data = remove_unknown_teams(all_data)\n\treturn all_data", "def load_all_years(save=False): \n df_1 = clean_all_first_half()\n df_1 = df_1[['term_clean', 'crime_clean', 'race_adjusted', 'nativity_no_states', 'Year']]\n df_1.columns = ['term', 'crime', 'race', 'nativity', 'year']\n\n df_2 = clean_all_second_half()\n df_2 = df_2[['term_clean', 'crime_clean', 'race', 'clean_nationality', 'Year']]\n df_2.columns = ['term', 'crime', 'race', 'nativity', 'year']\n df_all = pd.concat([df_1, df_2])\n df_all['max_term'] = df_all['term'].apply(lambda x: max_apply(x))\n df_all['min_term'] = df_all['term'].apply(lambda x: x[0])\n df_all['avg_term'] = df_all['term'].apply(lambda x: avg_term(x))\n df_all['violent'] = df_all['crime'].apply(lambda x: violent_apply(x))\n df_all['violent_sexual'] = df_all['crime'].apply(lambda x: violent_sexual_apply(x))\n df_all['moral'] = df_all['crime'].apply(lambda x: moral_apply(x))\n df_all['property'] = df_all['crime'].apply(lambda x: property_apply(x))\n df_all['deceit'] = df_all['crime'].apply(lambda x: deceit_apply(x))\n df_all['uncategorized'] = df_all['crime'].apply(lambda x: uncategorized_apply(x))\n df_all['life_sentence'] = df_all['max_term'].apply(lambda x: life_sentence(x))\n df_all['death_sentence'] = df_all['max_term'].apply(lambda x: death_sentence(x))\n df_all['life_or_death_sentence'] = df_all['max_term'].apply(lambda x: life_or_death_sentence(x))\n df_all['nativity'] = df_all['nativity'].apply(lambda x: fix_nativity(x))\n df_all['foreign'] = df_all['nativity'].apply(lambda x: foreign(x))\n df_all['larceny'] = df_all['crime'].apply(lambda x: larceny(x))\n df_all['burglary'] = df_all['crime'].apply(lambda x: burglary(x))\n df_all['murder'] = df_all['crime'].apply(lambda x: murder(x))\n df_all['robbery'] = df_all['crime'].apply(lambda x: robbery(x))\n df_all['forgery'] = df_all['crime'].apply(lambda x: forgery(x))\n df_all['assault'] = df_all['crime'].apply(lambda x: assault(x))\n df_all['all_larceny'] = df_all['crime'].apply(lambda x: all_larceny(x))\n df_all['all_manslaughter'] = df_all['crime'].apply(lambda x: all_manslaughter(x))\n df_all['foreign_and_race_combined'] = df_all.apply(lambda row: nativity_race_combined(row), axis =1)\n df_all['nativity_race_with_countries'] = df_all.apply(lambda row: nativity_race_with_countries(row), axis =1)\n\n\n\n if save:\n df_all.to_csv('../../data/confined_data_final.csv')\n else:\n return df_all", "def _mangle_loaded_res(self, res): # pylint: disable=no-self-use, unused-argument\n\n # Update columns lists in case some of the columns were removed from the loaded dataframe.\n for name in (\"_stats_colnames\", \"xaxes\", \"yaxes\", \"hist\", \"chist\"):\n colnames = []\n for colname in getattr(self, name):\n if colname in res.df:\n colnames.append(colname)\n setattr(self, name, colnames)\n\n for name in (\"_hov_colnames\", ):\n colnames = []\n val = getattr(self, name)\n for colname in val[res.reportid]:\n if colname in res.df:\n colnames.append(colname)\n val[res.reportid] = colnames\n return res.df", "def load_data(self) -> None:", "def opendatasets(self):\n\n print \"Open datasets\"\n self.combo_dataset_list.clear()\n self.combo_variable_list.clear()\n self.combo_wms_time_first_d.clear()\n self.combo_wms_time_first_h.clear()\n self.combo_wms_time_last_d.clear()\n self.combo_wms_time_last_h.clear()\n self.combo_wms_time_first_d_2.clear()\n self.combo_wms_time_first_h_2.clear()\n self.combo_wms_time_last_d_2.clear()\n self.combo_wms_time_last_h_2.clear()\n self.combo_wms_layer_depth.clear()\n self.combo_wms_layer_depth_2.clear()\n self.combo_wms_layer_depth_max_2.clear()\n self.combo_colorbar.clear()\n self.combo_proj.clear()\n product=str(self.combo_product_list.currentText())\n for key in self.dict_prod[product].keys():\n print \"Variable\"\n self.combo_dataset_list.addItem(str(key))\n self.combo_variable_list.setEnabled(True)", "def load_pandas():\n data = _get_data()\n return du.process_pandas(data, endog_idx=5, exog_idx=[10, 2, 6, 7, 8])", "def read_fx_data(self):\n\n dirStr = self.dir\n\n formatSpec1 = '%Y-%m-%d %H:%M:%S'\n formatSpec2 = '%m/%d/%Y %H:%M'\n\n dirN = os.fsencode(dirStr)\n data = []\n labels = {}\n fileIdx = 0\n\n for file in os.listdir(dirN):\n filename = os.fsdecode(file)\n if filename.endswith('.csv'):\n try:\n fileData, label = self.read_fx_data_from_file(os.path.join(dirStr, filename), formatSpec=formatSpec1)\n except:\n fileData, label = self.read_fx_data_from_file(os.path.join(dirStr, filename), formatSpec=formatSpec2)\n\n labels[fileIdx] = label\n fileIdx += 1\n data.append(fileData)\n\n # Drop columns where not all data are present\n scatData = pd.concat([df['Close'] for df in data], axis=1)\n for df in data:\n df.drop(scatData.index[scatData.isnull().any(1).nonzero()[0]], errors='ignore', inplace=True)\n\n return data, labels", "def merge_all_data(self):\n \n logging.info('***** Starting the merging process ')\n\n \n \"\"\" All possible unqiue_dates to loop on \"\"\"\n date_times = self.merged_unique_dates\n date_times.sort()\n \n date_times = np.array(date_times) \n \n \"\"\" List storing the indices of the date_index of the merged dataset \"\"\"\n all_merged_obs , all_merged_head, all_merged_fb , merged_indices , merged_date_time, mi= [] , [] , [] , [] , [], []\n \n \"\"\" Dictionary that will contain the merged file. \"\"\" \n # rand = datetime.strptime('1981-01-03 12:00:00', '%Y-%m-%d %H:%M:%S') \n #for dt in date_times[3008:3100]: # loop over all the possible date_times \n \n tot = len(date_times)\n for dt, c in zip(date_times[3008:3100], range(tot) ): # loop over all the possible date_times \n #print('Analize : ', str(c) , '/', str(tot) , ' ', dt , ' ', now(time.time()) )\n \n logging.info('Analize : %s %s /', str(c) , str(tot) )\n \n cleaned_df_container = {} \n chunk = ''\n \n for k in self.dataset_per_dt[dt] : # checking the list of available datasets \n \n index, index_up = self.unique_dates[k]['indices'][dt]['low'] , self.unique_dates[k]['indices'][dt]['up'] # extracting the exact chunk of the dataframe where the data of this are stored \n \n chunk = self.data[k]['dataframe'].iloc[index:index_up]\n \n chunk['date_time'] = dt\n chunk = self.clean_dataframe(chunk) # cleaning from wrong or nan values \n \n if len(chunk)==0:\n continue\n \n cleaned_df_container[k] = {} \n cleaned_df_container[k]['df'] = chunk # cleaned dataframe \n\n \n if all(value == 0 for value in cleaned_df_container.values()):\n logging.debug('No data were found! ')\n continue\n \n merged_observations_table, best_ds, duplicates, header = self.merge_record(dt, container = cleaned_df_container)\n \n merged_observations_table['source_id'] = best_ds # adding extra columns i.e. chosen dataset, other dataset with data, number of pressure levels \n merged_observations_table['z_coordinate_type'] = 1 # only pressure inn [Pa] available at the moment. Check z_coordinate_type table for the correpsonding code \n \n \n \"\"\" Extracting the merged feedback, flagging the advanced_observations_feedback flag = 1\"\"\"\n feedback, merged_obs = self.get_reanalysis_feedback( dt, merged_observations_table , reanalysis='era5fb', best_ds= best_ds)\n all_merged_fb.append(feedback) \n all_merged_obs.append(merged_obs)\n \n \"\"\" Setting the correct report_id in the header table \"\"\"\n merged_report_id = merged_obs['report_id'].values[0] # same report_id as calculated in the observation_table \n header['report_id'] = merged_report_id \n all_merged_head.append(header)\n \n #if len(merged_observations_table) != len(header): \n #print('lengths check best ds: ', best_ds , ' obs_merged: ' , len(merged_observations_table), ' feedback:' , len(feedback) , ' header: ' , len(header) )\n #print( len(merged_observations_table), ' ' , len(feedback) )\n\n \"\"\" New merged recordindex and recordtimestamps indices \"\"\"\n merged_indices.append(len(merged_observations_table)) \n merged_date_time.append(dt)\n\n\n \"\"\" Storing the merged date_time values and indices \"\"\"\n di=xr.Dataset()\n merged_date_time = np.array(merged_date_time)\n di['recordtimestamp'] = ( {'recordtimestamp' : merged_date_time.shape } , merged_date_time )\n \n \n \"\"\" Creating the merged indices \"\"\"\n mi.append(0)\n for i,ind in zip(merged_indices[0:], range(len(merged_indices[0:]) ) ) :\n mi.append(mi[ind] + i )\n mi = np.array(mi) \n di['recordindex'] = ( {'recordindex' : mi.shape } , mi )\n self.MergedRecordIndex = di \n \n \n \"\"\" Creating the merged dataframes \"\"\"\n logging.debug('*** Concatenating the observations_table dataframes' ) \n merged_obs = pd.concat (all_merged_obs)\n \n self.MergedObs = merged_obs \n logging.debug('*** Finished concatenating theobservations_table dataframes' ) \n \n logging.debug('*** Concatenating the header_table dataframes' ) \n merged_hd = pd.concat (all_merged_head)\n self.MergedHead = merged_hd \n logging.debug('*** Finished concatenating the header_table dataframes' ) \n \n logging.debug('*** Concatenating the feedback dataframes' ) \n merged_fb = pd.concat (all_merged_fb)\n self.MergedFeedback = merged_fb \n logging.debug('*** Finished concatenating the feedback dataframes' ) \n\n return 0", "def dataframe():\n\t#allows function to access station, gmt, and miss_station functions\n global stations\n\tglobal gmt\n\tglobal miss_station\n\t\n\t#read predictor file\n\tcontrol = cfg.read_yaml('../registry/graphs.yaml')\n\tpred_ctrl = cfg.read_yaml(cfg.get_config_path(control.pred_file))\n\tpredd_ctrl = cfg.read_yaml(cfg.get_config_path(control.predd_file))\n\n\t#get file paths and update database\n\tpredictor_file_path = control.predictor_file_path\n\tpredictand_file_path = control.predictand_file_path\n\tpred_file_id = update(predictor_file_path)\n\tpredd_file_id = update(predictand_file_path)\n\t\n\t#store lead time and date range\n\tlead_time = control.lead_time\n\tdate_range = control.date_range\n\n\t#get info for fetch many dates\n\tstart,end,stride = read_pred.parse_range(date_range)\n\tfcst_ref_time = control.date_range[0].split('-')[0][-2:]\n\t\n\t#initialize list of predictors\n\tpred_list = pred_ctrl.predictors\n\tpredictor = []\n\n\t#loops through predictors to build camps data objects\n\tfor entry_dict in pred_list:\n\t\t#formats metadata\n\t\tpred = create.preprocess_entries(entry_dict, fcst_ref_time)\n\t\t\n\t\t#adds info to metadata that's not currently being stored\n\t\tpred.search_metadata['reserved2'] = lead_time*3600\n pred.search_metadata['file_id'] = pred_file_id\n\t\tpred.search_metadata['reserved1'] = 'vector'\n\n\t\t#build camps data objects for each day\n\t\tvariable = fetch_many_dates(predictor_file_path,start,end,stride,pred.search_metadata)\n\t\t\n\t\t#appends all data to single camps object\n\t\tif variable[0] is not None:\n\t\t\tvar = variable[0]\n\t\t\tarrs = []\n\t\t\tfor i in range(len(variable)):\n\t\t\t\tarrs.append(variable[i].data)\n\t\t\tvar.data = np.stack(arrs)\n\t\t\tpredictor.append(var)\n\n\t#initializes list of predictands\n\tpredd_list = predd_ctrl.predictands\n predictand = []\n\t\n\t#loops through predictands to build camps data objects\n for entry_dict in predd_list:\n\t\t#formats metadata\n \tvertical_coordinate = entry_dict.pop('Vertical_Coordinate')\n\t\tentry_dict['file_id'] = predd_file_id\n\n\t\t#build camps objects for each day\n variable = fetch_many_dates(predictand_file_path,start, end, stride, entry_dict)\n\n\t\t#append all data to single camps object\n var = variable[0]\n arrs = []\n for i in range(len(variable)):\n arrs.append(variable[i].data)\n try:\n\t\t\tvar.data = np.stack(arrs)\n\t\t\tpredictand.append(var)\n\t\texcept:\n\t\t\tprint(\"Can't read \" + variable.name)\n\n\t#getting predictor station and time data\n\tpredr = Dataset(predictor_file_path[0])\n\tpredr_stat = predr.variables['station'][:]\n\tif lead_time == 3:\n\t\tpredr_time = predr.variables['OM__phenomenonTimeInstant'][:]\n\telif lead_time == 6:\n\t\tpredr_time = predr.variables['OM__phenomenonTimeInstant1'][:]\n\telif lead_time == 12:\n\t\tpredr_time = predr.variables['OM__phenomenonTimeInstant2'][:]\n\tpredr.close()\n\n\t#reformatting predictor station and time data\n\tpredr_stations = stations(predr_stat)\n\tpredr_gmt = gmt(predr_time)\n\t\n\t#getting predictand station and time data\n\tpredd = Dataset(predictand_file_path[0])\n\tpredd_stat = predd.variables['station'][:]\n\tpredd_time = predd.variables['OM__resultTime'][:]\n\tpredd.close()\n\t\n\t#reformatting predictand station and time data\n\tpredd_stations = stations(predd_stat)\n\tpredd_gmt = gmt(predd_time)\n\n\t#choosing predictand observations that line up with predictor time\n\thour = (predictor[0].metadata['FcstTime_hour']/3600) + lead_time\n\tdays = len(predd_gmt)/24\n\tpredd_hours = [0]*days\n k=0\n for i in range(len(predd_gmt)):\n if i%24 == hour:\n\t\t\tpredd_hours[k]=predd_gmt[i]\n\t\t\tk+=1\n\t\n\t#catches when GFS data doesn't cover the last day of the month\n\tif len(predr_gmt) < len(predd_hours):\n\t\tpredd_hours = predd_hours[:-1]\t\n\t\n\t#find missing stations\n\tmiss_stations = miss_station(predr_stations,predd_stations)\n\tstations = predd_stations\n\t\n\t#station and time array\n\tinfo = [['',''] for k in range(len(predr_gmt)*len(stations))]\n\tfor i in range(len(predr_gmt)):\n\t\tfor j in range(len(stations)):\n\t\t\tk = i*len(stations)+j\n\t\t\tinfo[k][0]=predr_gmt[i]\n\t\t\tinfo[k][1]=stations[j]\n\n\t#create column names\n\tnames = ['']*(len(predictor)+len(predictand)+2)\n\tnames[0]='Time'\n\tnames[1]='Station'\n\n\t#creating array\n\tarr = np.zeros((len(stations)*len(predr_gmt),len(predictor)+len(predictand)))\n\t\n\t#adding predictor data\n\tfor i in range(len(predictor)):\n\t\t#remove lead time and forecast reference time from variable name\n\t\t#and add variable name to column list of final dataframe\n\t\tif lead_time == 12:\n\t\t\tnames[i+2]='GFS_'+predictor[i].get_variable_name()[:-11]\n\t\telse:\n\t\t\t names[i+2]='GFS_'+predictor[i].get_variable_name()[:-10]\n\n\t\t#create pandas dataframe of data and sort alphabetically by station name\n\t\tpredictor[i].data = np.squeeze(predictor[i].data,axis=2)\n\t\tpredictor[i].data = pd.DataFrame(predictor[i].data,columns=predr_stations,index=predr_gmt)\n\t\tpredictor[i].data = predictor[i].data.reindex(sorted(predictor[i].data.columns),axis=1)\n\t\t\n\t\t#remove stations with no predictand data\n\t\tk=0\n\t\ta=miss_stations[:]\n\t\tfor j in predictor[i].data.columns:\n\t\t\tif not a:\n\t\t\t\tbreak\n\t\t\tif j==a[k]:\n\t\t\t\tpredictor[i].data=predictor[i].data.drop(j,axis=1)\n\t\t\t\tdel a[k]\n\t\t\n\t\t#add data to final dataframe\n\t\tfor b in range(len(predr_gmt)):\n\t\t\tfor c in range(len(stations)):\n\t\t\t\tk = b*len(stations)+c\n\t\t\t\tarr[k][i] = predictor[i].data.iloc[b][c]\n\n\t#add predictand data\n\tfor i in range(len(predictand)):\n\t\t#removing extra underscore, adding variable name to column names\n\t\tnames[len(predictor)+2+i]='METAR_'+predictand[i].get_variable_name()[:-1]\n\t\n\t\t#resize array and create pandas dataframe\n\t\tpredictand[i].data = np.squeeze(predictand[i].data,axis=2)\n\t\tpredictand[i].data = pd.DataFrame(predictand[i].data,columns=predd_stations,index=predd_hours)\n\t\tpredictand[i].data = predictand[i].data.reindex(sorted(predictand[i].data.columns),axis=1)\n\t\t\n\t\t#remove extra days of predictand data\n\t\tpredictand[i].data = predictand[i].data.iloc[0:len(predr_time),:]\n\t\t\t\n\t\t#add predictand data to array\n\t\tfor b in range(len(predr_gmt)):\n\t\t\tfor c in range(len(stations)):\n\t\t\t\tk = b*len(stations)+c\n\t\t\t\tval = predictand[i].data.iloc[b][c]\n\t\t\t\t\n\t\t\t\t#catch metar fill data\n\t\t\t\tif val == 9999: \n\t\t\t\t\tval = np.nan\n\t\t\t\tarr[k][len(predictor)+i]=val\n\t\n\t#add station and time data to array and save as csv\n\tdata = np.concatenate([info,arr],axis = 1)\n\tto_save = pd.DataFrame(data,columns=names)\n\tto_save.to_csv(str(start)+'_'+str(end)+'_'+str(lead_time)+'hrs.csv')", "def data_visualization_general(data):\n path_to_save = str(Path(__file__).parent.parent) + '/jupyter_notebook/'\n key = list(data.keys())[0] # we do this way because keys() return a dict-keys which is not subscriptable\n df = pd.DataFrame(data[key])\n file_name = key + \".csv\"\n df.to_csv(os.path.join(path_to_save, file_name))\n # we save it in the jupyter_notebook folder so that it will be easier to show data on jupyter notebook", "def get_data(self):\n\n all_data = OrderedDict()\n projects = [Path(proj) for proj in glob(str(self.data_path.joinpath(\"*\"))) if Path(proj).is_dir()]\n\n for project in projects:\n files = []\n \n # Read all csv files and save them as a list in files\n for ver in glob(str(project.joinpath(\"*.csv\"))):\n files.extend(pd.read_csv(ver, usecols=['time', 'buggy']).values.tolist())\n \n # Create a pandas dataframe from the csv sorted by datetime\n df = pd.DataFrame(files, columns=['Time', 'Bugs']).sort_values(by='Time').reset_index(drop=True)\n \n # Convert time to Pandas DateTime format\n df['Time'] = pd.to_datetime(df['Time']) \n \n # Group bug counts by week starting on monday\n df = df.reset_index().set_index('Time').groupby(\n [pd.Grouper(freq='W-MON')])[\"Bugs\"].sum().astype(int).reset_index()\n \n df = df.set_index('Time')\n # Save the data to dictionary\n all_data.update(OrderedDict({project.name: df}))\n\n return all_data", "def Dbase_to_DF(self):\n for item in sorted(self.dbase.keys()):\n self.dataFRAME[item]=self.dbase[item]\n return self.dataFRAME", "def load(fnames, tag='', inst_id=''):\n\n # Save each file to the output DataFrame\n data = load_csv_data(fnames, read_csv_kwargs={'index_col': 0,\n 'parse_dates': True})\n\n # Assign the meta data\n meta, status_desc = mm_ace.common_metadata()\n flux_desc = '5-min averaged Differential '\n\n meta['status_e'] = {meta.labels.units: '',\n meta.labels.name: 'Diff e- Flux Status',\n meta.labels.notes: '',\n meta.labels.desc: status_desc,\n meta.labels.fill_val: np.nan,\n meta.labels.min_val: 0,\n meta.labels.max_val: 9}\n meta['status_p'] = {meta.labels.units: '',\n meta.labels.name: 'Diff Proton Flux Status',\n meta.labels.notes: '',\n meta.labels.desc: status_desc,\n meta.labels.fill_val: np.nan,\n meta.labels.min_val: 0,\n meta.labels.max_val: 9}\n meta['anis_ind'] = {meta.labels.units: '',\n meta.labels.name: 'Anisotropy Index',\n meta.labels.notes: '',\n meta.labels.desc: 'Range: 0.0 - 2.0',\n meta.labels.fill_val: -1.0,\n meta.labels.min_val: 0.0,\n meta.labels.max_val: 2.0}\n meta['eflux_38-53'] = {meta.labels.units: 'particles/cm2-s-ster-MeV',\n meta.labels.name: 'Diff e- Flux 38-53 eV',\n meta.labels.notes: '',\n meta.labels.desc:\n ''.join([flux_desc,\n 'Electron Flux between 35-53 eV']),\n meta.labels.fill_val: -1.0e5,\n meta.labels.min_val: -np.inf,\n meta.labels.max_val: np.inf}\n meta['eflux_175-315'] = {meta.labels.units: 'particles/cm2-s-ster-MeV',\n meta.labels.name: 'Diff e- Flux 175-315 eV',\n meta.labels.notes: '',\n meta.labels.desc:\n ''.join([flux_desc,\n 'Electron Flux between 175-315 eV']),\n meta.labels.fill_val: -1.0e5,\n meta.labels.min_val: -np.inf,\n meta.labels.max_val: np.inf}\n meta['pflux_47-68'] = {meta.labels.units: 'particles/cm2-s-ster-MeV',\n meta.labels.name: 'Diff Proton Flux 47-68 keV',\n meta.labels.notes: '',\n meta.labels.desc:\n ''.join([flux_desc,\n 'Proton Flux between 47-68 keV']),\n meta.labels.fill_val: -1.0e5,\n meta.labels.min_val: -np.inf,\n meta.labels.max_val: np.inf}\n meta['pflux_115-195'] = {meta.labels.units: 'particles/cm2-s-ster-MeV',\n meta.labels.name: 'Diff Proton Flux 115-195 keV',\n meta.labels.notes: '',\n meta.labels.desc:\n ''.join([flux_desc,\n 'Proton Flux between 115-195 keV']),\n meta.labels.fill_val: -1.0e5,\n meta.labels.min_val: -np.inf,\n meta.labels.max_val: np.inf}\n meta['pflux_310-580'] = {meta.labels.units: 'particles/cm2-s-ster-MeV',\n meta.labels.name: 'Diff Proton Flux 310-580 keV',\n meta.labels.notes: '',\n meta.labels.desc:\n ''.join([flux_desc,\n 'Proton Flux between 310-580 keV']),\n meta.labels.fill_val: -1.0e5,\n meta.labels.min_val: -np.inf,\n meta.labels.max_val: np.inf}\n meta['pflux_795-1193'] = {meta.labels.units: 'particles/cm2-s-ster-MeV',\n meta.labels.name: 'Diff Proton Flux 795-1193 keV',\n meta.labels.notes: '',\n meta.labels.desc:\n ''.join([flux_desc,\n 'Proton Flux between 795-1193 keV']),\n meta.labels.fill_val: -1.0e5,\n meta.labels.min_val: -np.inf,\n meta.labels.max_val: np.inf}\n meta['pflux_1060-1900'] = {meta.labels.units: 'particles/cm2-s-ster-MeV',\n meta.labels.name:\n 'Diff Proton Flux 1060-1900 keV',\n meta.labels.notes: '',\n meta.labels.desc:\n ''.join([flux_desc,\n 'Proton Flux between 1060-1900 keV']),\n meta.labels.fill_val: -1.0e5,\n meta.labels.min_val: -np.inf,\n meta.labels.max_val: np.inf}\n return data, meta", "def WriteDataFrames(self, Outpath):\n\n newdataframes = self.newdataframes\n for staname in newdataframes.keys():\n fname = staname + '.TXT'\n newdataframes[staname].to_csv(Outpath + fname, float_format=\"%.2f\")\n print('--------------------')\n print('Writing dataframe')\n print('--------------------')", "def load_data(self):\n self.tif_file = self._find_tif_file()\n if self.with_labeling is not None:\n self.colabel_file = self._find_colabeled_file()\n self.colabel_stack = self._load_colabeled_img()\n self.dff, self.indices = self._populate_dff_data()\n self.loaded = True", "def _format_variables(df: EDAFrame, cfg: Config, data: Dict[str, Any]) -> Dict[str, Any]:\n res: Dict[str, Any] = {}\n # variables\n if not cfg.variables.enable:\n res[\"has_variables\"] = False\n return res\n\n res[\"variables\"] = {}\n res[\"has_variables\"] = True\n for col in df.columns:\n try:\n stats: Any = None # needed for pylint\n dtp = df.get_eda_dtype(col)\n tab_names: List[str] = []\n if isinstance(dtp, Continuous):\n itmdt = Intermediate(col=col, data=data[col], visual_type=\"numerical_column\")\n stats = format_num_stats(data[col])\n tab_names = [\"Stats\", \"Histogram\", \"KDE Plot\", \"Normal Q-Q Plot\"]\n elif type(dtp) in [Nominal, SmallCardNum, GeoGraphy, GeoPoint]:\n itmdt = Intermediate(col=col, data=data[col], visual_type=\"categorical_column\")\n stats = format_cat_stats(\n data[col][\"stats\"], data[col][\"len_stats\"], data[col][\"letter_stats\"]\n )\n tab_names = [\"Stats\", \"Word Length\", \"Pie Chart\", \"Word Cloud\", \"Word Frequency\"]\n elif isinstance(dtp, DateTime):\n itmdt = Intermediate(\n col=col,\n data=data[col][\"stats\"],\n line=data[col][\"line\"],\n visual_type=\"datetime_column\",\n )\n stats = stats_viz_dt(data[col][\"stats\"])\n else:\n raise RuntimeError(f\"the type of column {col} is unknown: {type(dtp)}\")\n\n rndrd = render(itmdt, cfg)\n layout = rndrd[\"layout\"]\n figs_var: List[Figure] = []\n for tab in layout:\n try:\n fig = tab.children[0]\n except AttributeError:\n fig = tab\n # fig.title = Title(text=tab.title, align=\"center\")\n figs_var.append(fig)\n comp = components(figs_var)\n\n res[\"variables\"][col] = {\n \"tabledata\": stats,\n \"col_type\": itmdt.visual_type.replace(\"_column\", \"\"),\n \"tab_names\": tab_names,\n \"plots\": comp,\n }\n\n except:\n print(f\"error happended in column:{col}\", file=sys.stderr)\n raise\n\n return res", "def glass_pandas(self):\n # pandas.set_option('display.width', 120)\n # TODO timeit (git_implementation) vs (my_implementation)\n # * df = pd.DataFrame(json.loads(r.text))\n # * df = df.set_index('t')\n # * df.index = pd.to_datetime(df.index, unit='s')\n # * df = df.sort_index()\n # * s = df.v\n # * s.name = '_'.join(url.split('/')[-2:])\n # * return s\n # for elem in self.loaded:\n # _metric, _data = elem[1]['_metrics'], elem[1]['_data']\n # try:\n # frame_keys = ['t'] + list(_data[0]['o'].keys())\n # framed = pandas.DataFrame(\n # data=[{k: (_data[iters]['t'] if k in 't' else _data[iters]['o'][k])\n # for k in frame_keys} for iters in range(len(_data))],\n # columns=frame_keys)\n # except KeyError:\n # framed = pandas.DataFrame(_data)\n # framed.set_index('t', inplace=True)\n # framed.index = pandas.to_datetime(\n # framed.index.to_flat_index(), unit='s', infer_datetime_format=True)\n # framed.sort_index(inplace=True)\n # framed.name = _metric\n # print(framed.name)\n # print(framed)", "def D_Base_to_Exel(self):\n# for item in sorted(self.dbase.keys()): # for every key/cell add to a dataFRAME\n# self.dataFRAME[item]=self.dbase[item]\n \n self.dataFRAME = self.Dbase_to_DF()\n writer = ExcelWriter(self.path+'/ALLwells'+ self.filetype) # assign a path for the file\n self.dataFRAME.to_excel(writer, 'Sheet1') # create a file in the same path the original files came from\n writer.save()", "def get_full_df(self):\n\n galaxies = []\n for i, gal_name in enumerate(self.filenames):\n g_df = self.galaxies[gal_name].all_particle_properties(\n ).to_pandas()\n g_df['name'] = self.names[i]\n g_df['snap'] = self.snaps[i]\n galaxies.append(g_df)\n return pd.concat(galaxies)", "def __init__(self, animals_data=\"animals.json\", food_data=\"food.json\", zookeeper_data=\"zookeeper.json\"):\n if type(animals_data) is pd.DataFrame:\n self.animals_df = animals_data\n else:\n self.animals_df = pd.read_json(animals_data)\n\n if type(food_data) is pd.DataFrame:\n self.food_df = food_data\n else:\n self.food_df = pd.read_json(food_data)\n\n if type(zookeeper_data) is pd.DataFrame:\n self.zookeeper_df = zookeeper_data\n else:\n self.zookeeper_df = pd.read_json(zookeeper_data)", "def _load_timeseries(self, var, fix_times=False, master=False, preprocess=None,\n load_kws={}, **case_kws):\n\n is_var = not isinstance(var, basestring)\n if is_var:\n field = var.varname\n is_var = True\n else:\n field = var\n\n if case_kws:\n # Load/return a single case\n prefix = self.case_prefix(**case_kws)\n suffix = self.case_suffix(**case_kws)\n\n path_to_file = os.path.join(\n self.data_dir,\n self.case_path(**case_kws),\n prefix + field + suffix,\n )\n logger.debug(\"{} - loading {} timeseries from {}\".format(\n self.name, field, path_to_file\n ))\n ds = load_variable(field, path_to_file, fix_times=fix_times, **load_kws)\n\n if preprocess is not None:\n ds = preprocess(ds, **case_kws)\n\n return ds\n else:\n\n data = dict()\n\n for case_kws, filename in self.walk_files(field):\n\n try:\n ds = load_variable(field, filename, fix_times=fix_times, **load_kws)\n\n if preprocess is not None:\n ds = preprocess(ds, **case_kws)\n\n data[self.case_tuple(**case_kws)] = ds\n except:\n logger.warn(\"Could not load case %r\" % case_kws)\n data[self.case_tuple(**case_kws)] = xr.Dataset({field: np.nan})\n\n if is_var:\n var._data = data\n var._loaded = True\n\n if master:\n ds_master = create_master(self, field, data)\n\n if is_var:\n var.master = ds_master\n\n data = ds_master\n\n return data", "def load_data_pickle(self, load_full=False):\n self.train = pd.read_pickle('../input/train_mod.pkl')\n self.test = pd.read_pickle('../input/test_mod.pkl')\n if load_full:\n self.train_full = pd.read_pickle('../input/train_full_mod.pkl')", "def _format_meta_pre_merge(self):\n self.__col_name_map = {\n ColNameFormatter.fmt(c): c\n for c in self.data.solar_meta.columns.values\n }\n\n self._rename_cols(self.data.solar_meta, prefix=SOLAR_PREFIX)\n self._rename_cols(self.data.wind_meta, prefix=WIND_PREFIX)\n\n self._save_rep_prof_index_internally()", "def read_dfdict_data(datadir, subset=None):\n print('Reading datasets...')\n # Initialize dict to store all dataframes\n dfdict = {}\n\n # If subset of datasets are given, read only those\n if subset is not None:\n with open(subset, 'r') as f:\n datasetids = f.read().splitlines()\n else:\n datasetids = get_dataset_ids(datadir)\n\n # Read each dataset and convert to relative abundance\n for dataset in datasetids:\n print(dataset),\n ## Read dataset\n df, meta = read_dataset_files(dataset, datadir)\n df = raw2abun(df)\n\n ## Get case and control samples\n classes_list = get_classes(meta)\n if len(classes_list[0]) == 0 or len(classes_list[1]) == 0:\n raise ValueError('Something wrong with ' + dataset + ' metadata.')\n H_smpls, dis_smpls = get_samples(meta, classes_list)\n\n dfdict.update({dataset: {'df': df, 'meta': meta, 'dis_smpls': dis_smpls, 'H_smpls': H_smpls, 'classes': classes_list}})\n print('\\nReading datasets... Finished.')\n return dfdict", "def load(self):\n hdf_filename = os.path.join(self._dump_dirname, 'result.h5')\n if os.path.isfile(hdf_filename):\n store = pd.HDFStore(hdf_filename, mode='r')\n keys = store.keys()\n if keys == ['/df']:\n self.set_result(store['df'])\n else:\n if set(keys) == set(map(lambda i: '/%s' % i, range(len(keys)))):\n # keys are not necessarily ordered\n self.set_result([store[str(k)] for k in range(len(keys))])\n else:\n self.set_result({k[1:]: store[k] for k in keys})\n\n else:\n self.set_result(joblib.load(\n os.path.join(self._output_dirname, 'dump', 'result.pkl')))", "def _load_data(self):\n self.mapper = Mapper()\n self.mapper.generate_vocabulary(self.review_summary_file)\n self.X_fwd, self.X_bwd, self.Y = self.mapper.get_tensor(reverseflag=True)\n # Store all the mapper values in a dict for later recovery\n self.mapper_dict = dict()\n self.mapper_dict['seq_length'] = self.mapper.get_seq_length()\n self.mapper_dict['vocab_size'] = self.mapper.get_vocabulary_size()\n self.mapper_dict['rev_map'] = self.mapper.get_reverse_map()\n # Split into test and train data\n self._split_train_tst()", "def load_pandas():\n data = _get_data()\n return du.process_pandas(data, endog_idx=0)", "def load_pandas():\n data = _get_data()\n return du.process_pandas(data, endog_idx=0)", "def organise_baseline_data(self):\n self.baseline_data = {}\n for injkey in self.data_sets.keys():\n data = {}\n baseline_result = self.data_sets[injkey].pop('full_syst_baseline')\n datakey = baseline_result.keys()[0]\n baseline_data = self.systtest_fit_extract(\n fit_data=baseline_result[datakey],\n datakey=datakey,\n labels=self.labels[injkey]['full_syst_baseline'].dict\n )\n self.baseline_data[injkey] = baseline_data", "def __load_factors(self):\n\t\tin_path = self.dir_base / self[\"files\"][\"factors\"]\n\t\tlog.info(\"Loading factors from %s\" % in_path)\n\t\t(W,H,doc_ids,terms) = load_nmf_factors(in_path)\n\t\tcolumns = np.arange(1, self[\"k\"]+1, dtype=int)\n\t\tself.document_associations = pd.DataFrame(W, index = doc_ids, columns = columns)\n\t\tself.term_associations = pd.DataFrame(np.transpose(H), index = terms, columns = columns)", "def get_data():\n \n \"\"\" Prepare variables\"\"\"\n urls = {\"cases\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Confirmed.csv\",\n \"deaths\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Deaths.csv\",\n \"recovered\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Recovered.csv\"}\n\n localnames = {\"cases\": \"Cases.csv\",\n \"deaths\": \"Deaths.csv\",\n \"recovered\": \"Recovered.csv\"}\n\n dfs = {\"cases\": None,\n \"deaths\": None,\n \"recovered\": None}\n\n \"\"\" Download\"\"\"\n for key in urls.keys():\n url = urls[key]\n localname = localnames[key]\n urllib.request.urlretrieve(url, localname)\n\n \"\"\" Load variables\"\"\"\n for key in dfs.keys():\n dfs[key] = pd.read_csv(localnames[key])\n \n \"\"\" Return\"\"\"\n return(dfs)", "def write_merged_file(self):\n \n #out_name = os.getcwd() + '/FAST_INDEX_merged_' + [ x for x in self.datasets[ list(self.datasets_keys)[0]].split('/') if '.nc' in x ] [0] \n \n \"\"\" Loading the econding of variables created from the harvester script \"\"\"\n encodings = np.load('groups_encodings.npy' , allow_pickle = True ).item()\n \n if not os.path.isdir(self.out_dir):\n Path(self.out_dir).mkdir(parents=True, exist_ok=True)\n \n out_name = self.out_dir + '/' + self.station + '_CEUAS_merged_v0.nc' \n \n logging.info('Writing the observations_tables to the netCDF output via xarray to_netcdf() ')\n #obs_tab = self.MergedObs[ ['date_time' , 'latitude', 'longitude' , 'observation_value' , 'observed_variable' , 'source_id' , 'observation_id', 'z_coordinate' ] ] # including only some columns \n obs_tab = self.MergedObs # including only some columns \n obs_tab = self.add_cdm_missing_columns(obs_tab) \n \n \"\"\" \n # Old using xarray\n obs_tab = obs_tab.to_xarray() \n for v in obs_tab.variables:\n if v == \"index\" or v == \"hdrlen\" or 'string' in v:\n continue\n obs_tab[v].attrs['external_table'] = self.attributes['observations_table'][v]['external_table']\n obs_tab[v].attrs['description'] = self.attributes['observations_table'][v]['description']\n \"\"\"\n\n for k in obs_tab.columns:\n print('Writing the observation table using h5py new method for the variable: ' , k )\n df = obs_tab[ [k] ] # making a 1 column dataframe \n write_dict_h5(out_name, df, k, encodings['observations_table'], var_selection=[], mode='a', attrs={'date_time':('units','seconds since 1900-01-01 00:00:00')})\n \n #obs_tab.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='w' , group = 'observations_table') # writing the merged observations_table \n \n \n \n logging.info('Writing the header_table to the netCDF output via xarray ')\n head_tab = self.MergedHead.to_xarray()\n for v in head_tab.variables: \n if v == \"index\" or v == \"hdrlen\" or v == \"string80\":\n continue\n head_tab[v].attrs['external_table'] = self.attributes['header_table'][v]['external_table']\n head_tab[v].attrs['description'] = self.attributes['header_table'][v]['description']\n \n head_tab.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = 'header_table') # writing the merged observations_table \n \n \n \n logging.info('Writing the station_configuration and source_configurations tables to the netCDF output via xarray ') \n for k in self.data.keys():\n if k == 'cdm_tables':\n continue \n group_name = k + '_station_configuration'\n sc = self.data[k]['station_configuration'].to_xarray()\n sc.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name )\n \n group_name = k + '_source_configuration'\n sc = self.data[k]['source_configuration'].to_xarray()\n sc.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name )\n \n \"\"\" To be fixed ! \"\"\"\n #group_name = k + '_source_configuration'\n #sc = self.data[k]['source_configuration'][:1].to_xarray()\n #sc.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name ) \n \n logging.info('Writing the merged record indices to the netCDF output ') \n di = self.MergedRecordIndex\n di.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a')\n \n logging.info('Writing the merged feedback to the netCDF output ') \n group_name = 'era5fb' \n di = self.MergedFeedback\n di = di.to_xarray()\n di.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name )\n \n logging.info('Writing the standard cdm tables to the netCDF output ') \n for t in self.data['cdm_tables'].keys(): \n d = self.data['cdm_tables'][t]\n d.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = t )\n \n logging.info('*** Done writing the output netCDF file ')", "def get_datasets(load_key=None, maven=False):\n ds_names = {}\n if load_key == 'R2349': \n ds_names['batsrus_mf_lr'] = model_dir+'R2349/batsrus_3d_multi_fluid_lowres.h5'\n ds_names['batsrus_multi_species'] = model_dir+'R2349/batsrus_3d_multi_species.h5'\n ds_names['batsrus_electron_pressure'] = model_dir+'R2349/batsrus_3d_pe.h5'\n ds_names['heliosares'] ='/Volumes/triton/Data/ModelChallenge/R2349/heliosares_multi.h5'\n #ds_names['rhybrid'] ='/Volumes/triton/Data/ModelChallenge/R2349/rhybrid.h5'\n \n ds_types = {'batsrus1':[key for key in ds_names.keys() if 'multi_fluid' in key],\n 'batsrus2':[key for key in ds_names.keys() if 'multi_species' in key],\n 'batsrus3':[key for key in ds_names.keys() if 'electron_pressure' in key],\n 'batsrus4':[key for key in ds_names.keys() if 'mf_lr' in key],\n 'heliosares':[key for key in ds_names.keys() if 'helio' in key],\n 'rhybrid_helio':[key for key in ds_names.keys() if 'rhybrid' in key ]}\n if maven or True:\n ds_names['maven']=orbit_dir+'orbit_2349.csv'\n #ds_names['maven'] = orbit_dir+'orbit_plume_2349.csv'\n ds_types['maven']=['maven']\n elif load_key == 'batsrus_mf_lowres':\n ds_names['batsrus_mf_lr'] = model_dir+'R2349/batsrus_3d_multi_fluid_lowres.h5'\n ds_types = {'batsrus_mf_lr' : ['batsrus_mf_lr']}\n\n\n elif load_key == 'helio_multi':\n ds_names['t00550'] = model_dir+'R2349/Heliosares_Multi/t00550.h5'\n ds_names['t00560'] = model_dir+'R2349/Heliosares_Multi/t00560.h5'\n ds_names['t00570'] = model_dir+'R2349/Heliosares_Multi/t00570.h5'\n ds_names['t00580'] = model_dir+'R2349/Heliosares_Multi/t00580.h5'\n ds_names['t00590'] = model_dir+'R2349/Heliosares_Multi/t00590.h5'\n ds_names['t00600'] = model_dir+'R2349/Heliosares_Multi/t00600.h5'\n ds_names['t00610'] = model_dir+'R2349/Heliosares_Multi/t00610.h5'\n ds_names['t00620'] = model_dir+'R2349/Heliosares_Multi/t00620.h5'\n ds_names['t00630'] = model_dir+'R2349/Heliosares_Multi/t00630.h5'\n ds_names['t00640'] = model_dir+'R2349/Heliosares_Multi/t00640.h5'\n ds_names['t00650'] = model_dir+'R2349/Heliosares_Multi/t00650.h5'\n\n ds_types = {'heliosares':[key for key in ds_names.keys()]}\n if maven:\n #ds_names['maven'] = orbit_dir+'orbit_2349.csv'\n ds_types['maven']=['maven']\n elif load_key == 'SDC_BATS':\n ds_names['LS180_SSL000_max'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL000_max.h5'\n ds_names['LS270_SSL000_max'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL000_max.h5'\n ds_names['LS090_SSL000_max'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL000_max.h5'\n ds_names['LS180_SSL270_max'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL270_max.h5'\n ds_names['LS270_SSL270_max'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL270_max.h5'\n ds_names['LS090_SSL270_max'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL270_max.h5'\n ds_names['LS180_SSL180_max'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL180_max.h5'\n ds_names['LS270_SSL180_max'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL180_max.h5'\n ds_names['LS090_SSL180_max'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL180_max.h5'\n ds_names['LS180_SSL000_min'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL000_min.h5'\n ds_names['LS270_SSL000_min'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL000_min.h5'\n ds_names['LS090_SSL000_min'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL000_min.h5'\n ds_names['LS180_SSL270_min'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL270_min.h5'\n ds_names['LS270_SSL270_min'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL270_min.h5'\n ds_names['LS090_SSL270_min'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL270_min.h5'\n ds_names['LS180_SSL180_min'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL180_min.h5'\n ds_names['LS270_SSL180_min'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL180_min.h5'\n ds_names['LS090_SSL180_min'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL180_min.h5'\n\n ds_types = {'batsrus':[key for key in ds_names.keys()]}\n\n elif load_key == 'SDC_G1':\n #BATSRUS\n ds_names['bats_min_LS270_SSL0'] = \\\n model_dir+'SDC_Archive/BATSRUS/'+'3d__ful_4_n00060000_PERmin-SSLONG0.h5'\n ds_names['bats_min_LS270_SSL180'] = \\\n model_dir+'SDC_Archive/BATSRUS/'+'3d__ful_4_n00060000_PERmin-SSLONG180.h5'\n ds_names['bats_min_LS270_SSL270'] = \\\n model_dir+'SDC_Archive/BATSRUS/'+'3d__ful_4_n00060000_PERmin-SSLONG270.h5' \n \n #HELIOSARES\n #ds_names['helio_1'] = \\\n # model_dir+'SDC_Archive/HELIOSARES/Hybrid/'+'helio_1.h5'\n \n #ds_names['helio_2'] = \\\n # model_dir+'SDC_Archive/HELIOSARES/Hybrid/'+'helio_2.h5'\n \n \n ds_types = {'batsrus1':[key for key in ds_names.keys() if 'bats' in key],\n 'heliosares':[key for key in ds_names.keys() if 'helio' in key]}\n if maven:\n pass\n #ds_names['maven'] = orbit_dir+'orbit_2349.csv'\n #ds_types['maven']=['maven']\n\n elif load_key == 'rhybrid_res':\n ds_names = {'rhybrid240':'/Volumes/triton/Data/ModelChallenge/R2349/rhybrid.h5',\n 'rhybrid120':'/Volumes/triton/Data/ModelChallenge/R2349/HYB/state00030000.h5'}\n ds_types = {'rhybrid1':['rhybrid240'], 'rhybrid2':['rhybrid120']}\n elif load_key == 'batsrus_tseries':\n ds_names = {'batsrus_mf':'/Volumes/triton/Data/ModelChallenge/R2349/BATSRUS/10km_mf/3d__ful_4_n00040000.h5',\n 'batsrus_ms':'/Volumes/triton/Data/ModelChallenge/R2349/BATSRUS/10km_ms/3d__mhd_6_n0050000.h5'}\n ds_types = {'batsrus_mf':['batsrus_mf'], 'batsrus_ms':['batsrus_ms']}\n\n elif load_key == 'maven':\n ds_names, ds_types = {},{}\n ds_names['maven'] = orbit_dir+'orbit_2349.csv'\n ds_types['maven']=['maven']\n elif load_key == 'exo_2349':\n keys = ['2349_1RM_225km','2349_1RM_450km', '2349_2RM_450km',\n '2349_2RM_900km','2349_4RM_900km'] \n ds_names = {k:exo_dir+'/'+k+'/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n elif load_key == 'exo_comparisonA':\n keys = ['2349_1RM_225km', '2349_2RM_450km',\n '2349_1.5RM_338km'] \n ds_names = {k:exo_dir+'/ComparisonA/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n elif load_key == 'exo_comparisonB':\n keys = ['2349_1RM_225km', 'T0_1RM_225km', 'T1_1RM_225km', \"T2_1RM_225km\"] \n ds_names = {k:exo_dir+'/ComparisonB/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n\n elif load_key == 'exo_t1':\n keys = ['T1_1RM_112km', 'T1_1RM_225km', #'T1_1RM_450km',\n 'T1_2RM_225km', 'T1_2RM_450km', #'T1_2RM_900km',\n 'T1_4RM_900km']\n\n ds_names = {k:exo_dir+'/'+k+'/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n\n else:\n print('No datasets selected')\n \n\n return (ds_names, ds_types)", "def process_data(self):\n logging.debug('process_data called')\n\n pd_time_series = pd.read_csv(f'{self.out_dir}docs/downloaded/'\n f'{self.filename}')\n\n pd_time_series = pd_time_series.drop('Lat', axis=1)\n pd_time_series = pd_time_series.drop('Long', axis=1)\n no_of_dates = len(pd_time_series.columns) - 2\n dateindex = pd.date_range(start='1-22-2020',\n periods=no_of_dates,\n freq='D').strftime('%d-%m')\n\n new_cols = ['Province/State', 'Country/Region']\n for index in dateindex:\n new_cols.append(index)\n pd_time_series.columns = new_cols\n\n pd_time_series = pd_time_series.drop('Province/State', axis=1)\n pd_edit_series = pd_time_series.set_index('Country/Region')\n\n pd_edit_series = pd_edit_series.T\n\n return pd_edit_series", "def load_data(self):\n self.data = pd.read_csv(self.data_path, dtype=self.dtype)\n self.data.columns = self.data_cols\n self.data.topic = self.data.topic.str.lower()\n logging.debug(f'Data Load Complete: {self.data_path}')", "def _generate_datasets(self):\n datasets = list()\n for fname in sorted(os.listdir(self.base_dir)):\n if not self._filename_re.match(fname):\n continue\n\n file_path = os.path.join(self.base_dir, fname)\n try:\n fh = self._open_hdf5(file_path)\n\n except (IOError, OSError) as e:\n warnings.warn('Cannot access {}; skipped'.format(file_path))\n print(e)\n continue\n\n for key in fh:\n if self._groupname_re.match(key.lstrip('/')):\n datasets.append(ObjectTableWrapper(fh, key, self._schema))\n continue\n\n warn_msg = 'incorrect group name \"{}\" in {}; skipped this group'\n warnings.warn(warn_msg.format(os.path.basename(file_path), key))\n\n return datasets", "def _loadData(self, data):\n Movie._loadData(self, data)\n PlexHistory._loadData(self, data)", "def pandas_convert(self):\n data = {}\n\n for names in self.data[0]:\n col_values = []\n\n if names in objects:\n for items in self.data[0][names]:\n col_values = []\n\n col_name = names + \"_\" + items\n\n for i in range(len(self.data)):\n col_values.append(self.data[i][names][items])\n\n data[col_name] = col_values\n else:\n for i in range(len(self.data)):\n col_values.append(self.data[i][names])\n \n data[names] = col_values\n\n self.pandas_df = pd.DataFrame(data=data)\n self.__clean_df()\n\n return self.pandas_df", "def load_up_initial_db(self, date_dict):\n df_tot = []\n for chunk in pd.read_sql_table(self.table, self.disk_engine, chunksize=10000, parse_dates=date_dict):\n df_tot.append(chunk)\n self.df = pd.concat(df_tot)", "def load(self, filenameprefix=None):\r\n if not filenameprefix:\r\n filenameprefix = self.name_prefix\r\n dat = self # historical\r\n # dat.xrecent = _fileToMatrix(filenameprefix + 'xrecentbest.dat')\r\n dat.xmean = _fileToMatrix(filenameprefix + 'xmean.dat')\r\n dat.std = _fileToMatrix(filenameprefix + 'stddev' + '.dat')\r\n # a hack to later write something into the last entry\r\n for key in ['xmean', 'std']: # 'xrecent',\r\n dat.__dict__[key].append(dat.__dict__[key][-1]) # copy last row to later fill in annotation position for display\r\n dat.__dict__[key] = array(dat.__dict__[key], copy=False)\r\n dat.f = array(_fileToMatrix(filenameprefix + 'fit.dat'))\r\n dat.D = array(_fileToMatrix(filenameprefix + 'axlen' + '.dat'))\r\n return dat", "def fill_dataset(self):\n rm, rstd = self.get_rolling_stats()\n\n self.add_rolling_mean(rm)\n self.add_bollinger_bands(rstd)\n self.add_spy_info()\n self.add_beta_and_sharpe()\n self.add_stlouis_data()", "def load():\n filepath = dirname(abspath(__file__))\n##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####\n data = recfromtxt(open(filepath + '/spector.csv',\"rb\"), delimiter=\" \",\n names=True, dtype=float, usecols=(1,2,3,4))\n names = list(data.dtype.names)\n endog = array(data[names[3]], dtype=float)\n endog_name = names[3]\n exog = column_stack(data[i] for i in names[:3]).astype(float)\n exog_name = names[:3]\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name = endog_name, exog_name=exog_name)\n return dataset", "def _get_data(self):\n project_name, experiment_id = self.parent._get_parent_identifiers()\n\n self._data = self.repository.get_dataframe_data(\n project_name, self.id, experiment_id=experiment_id\n )", "def manage_data(filename=\"rookies_stats.csv\"):\n dirty_rookies = pd.read_csv(os.path.join(\"data\", filename))\n cleaned_rookies = clean_data(dirty_rookies)\n cleaned_rookies.to_csv(os.path.join(\"data\", f\"cleaned_{filename}\"))\n\n drop_for_join = [\"rk\", \"player\", \"age\", \"mp\", \"fg_pct\", \"threes_pct\", \"ft_pct\"]\n dirty_rookies.drop(drop_for_join, axis=1, inplace=True)\n rookie_data = dirty_rookies.join(cleaned_rookies, on=dirty_rookies.index)\n data_to_database(rookie_data)", "def refresh_accuracy_df(self):\n acc_arr = []\n \n # load in current df if it exists\n\n # walk dir looking for saved net stats\n net_dir = os.path.join(self.data_dir, f\"nets\")\n for root, _, files in os.walk(net_dir):\n \n # only interested in locations files are saved\n if len(files) <= 0:\n continue\n \n slugs = root.split(\"/\")\n\n # exclude some dirs...\n if any(self.exclude_slug in slug for slug in slugs):\n continue\n \n # consider all files...\n for filename in files:\n\n # ...as long as they are perf_stats\n if not \"perf_stats\" in filename:\n continue\n \n filepath = os.path.join(root, filename)\n stats_dict = np.load(filepath, allow_pickle=True).item()\n \n dataset = stats_dict.get(\"dataset\") if stats_dict.get(\"dataset\") is not None else \"imagenette2\"\n net_name = stats_dict.get(\"net_name\")\n train_scheme = stats_dict.get(\"train_scheme\") if stats_dict.get(\"train_scheme\") is not None else \"sgd\"\n group = stats_dict.get(\"group\")\n case = stats_dict.get(\"case\")\n sample = stats_dict.get(\"sample\")\n\n perf_stats = np.array([s for s in stats_dict.get(\"perf_stats\") if s is not None])\n for epoch in range(len(perf_stats)):\n try:\n (val_acc, val_loss, train_acc, train_loss) = perf_stats[epoch]\n except TypeError:\n print(f\"Entry in perf_stats did not match expectations. Dataset: {dataset}; Scheme: {train_scheme}; Case {case}; Sample: {sample}; Epoch: {epoch}\")\n continue\n acc_arr.append([dataset, net_name, train_scheme, group, case, sample, epoch, val_acc, val_loss, train_acc])\n \n # make dataframe\n acc_df = pd.DataFrame(acc_arr, columns=self.net_idx_cols+[\"val_acc\", \"val_loss\", \"train_acc\"])\n \n # save df\n self.save_df(\"acc_df.csv\", acc_df)", "def loadData(self):\n\n for info in os.walk(settings.BEHAVIOR_PATH):\n path = info[0]\n\n # Get the files, if there are any\n for element in info[2]:\n split = element.split(\".\")\n\n # If there's only one '.' in the filename, then we know it's not a .old.h5 file, or a file without an extension.\n if(len(split) == 2):\n name, extension = element.split(\".\")\n\n if(self.log):\n logging.debug(\"Name: \" + name + \" Extension: \" + extension)\n\n for animal in self.subjects:\n\n # Get the date from the name and format it in ISO format to compare to the current date.\n experimentDate = name.split(\"_\")[-1]\n isoDate = experimentDate[:4] + \"-\" + experimentDate[4:6] + \"-\" + experimentDate[6:8]\n\n if(self.log):\n logging.debug(\"Comparing date: \" + str(isoDate) + \" to \" + str(self.date) + \" (today)\")\n\n # We only want data from today from an animal that we care about\n if(self.date == extrafuncs.parse_isodate(isoDate) and extension == \"h5\" and animal in name):\n try:\n full_path = os.path.join(path, element)\n self.behavData.append((full_path, loadbehavior.BehaviorData(full_path, readmode='full')))\n if(self.log):\n logging.info(\"Successfully loaded data from: \" + full_path)\n except:\n self.sendToAllSubscribers(\"Error when attempting to load \" + full_path + \".\", \"Alert: Alarm error\")\n if(self.log):\n logging.error(\"Could not load \" + full_path + \".\")", "def __init__(self, *args, **kwargs):\r\n \"\"\"ALso metada are stored and can be use later.\"\"\"\r\n \"\"\"Number of rows is also cut from all blank lines\"\"\"\r\n self.file = pd.read_csv(args[0], header=1, delimiter=\",\")\r\n rng = [2 * n for n in range(1, len(self.file.columns) // 2)]\r\n self.dfs = np.split(self.file, rng, axis=1)\r\n self.meta_data = []\r\n for n in range(len(self.dfs)):\r\n self.dfs[n] = self.dfs[n].rename(columns={'Wavelength (nm).' + str(n): 'Wavelength (nm)', 'Abs.' + str(n): 'Abs'})\r\n pom = self.dfs[n]\r\n index = np.where(pd.isnull(pom))\r\n index = np.where(pd.isnull(pom))\r\n index = index[0][0]\r\n pom_cela = pom.iloc[:index]\r\n self.meta_data.append(pom.iloc[index:])\r\n self.dfs[n] = pd.DataFrame(pom_cela)", "def data(dataset=\"bio_eventrelated_100hz\"):\n # TODO: one could further improve this function with like\n # selectors 'ecg=True, eda=True, restingstate=True' that would\n # find the most appropriate dataset\n\n dataset = dataset.lower()\n\n # TODO: change this path back to \"master\"\n path = \"https://raw.githubusercontent.com/neuropsychology/NeuroKit/dev/data/\"\n\n # Signals as vectors =======================\n if dataset in [\"eeg\", \"eeg_150hz\", \"eeg.txt\"]:\n return pd.read_csv(path + \"eeg.txt\").values[:, 0]\n\n if dataset in [\"rsp\", \"rsp_1000hz\", \"rsp_1000hz.txt\"]:\n return pd.read_csv(path + \"rsp_1000hz.txt\", header=None).values[:, 0]\n\n if dataset in [\"ecg\", \"ecg_1000hz\", \"ecg_1000hz.csv\"]:\n return pd.read_csv(path + \"ecg_1000hz.csv\")[\"ECG\"].values\n\n if dataset in [\"ecg_3000hz\", \"ecg_3000hz.csv\"]:\n return pd.read_csv(path + \"ecg_1000hz.csv\")[\"ECG\"].values\n\n if dataset in [\"eog\", \"veog\", \"eog_100hz\", \"eog_100hz.csv\"]:\n return pd.read_csv(path + \"eog_100hz.csv\")[\"vEOG\"].values\n\n # Dataframes ===============================\n if dataset == \"iris\":\n info = sklearn_datasets.load_iris()\n data = pd.DataFrame(\n info.data, columns=[\"Sepal.Length\", \"Sepal.Width\", \"Petal.Length\", \"Petal.Width\"]\n )\n data[\"Species\"] = info.target_names[info.target]\n return data\n\n if dataset in [\"eogs\", \"eogs_200hz\", \"eog_200hz\", \"eog_200hz.csv\"]:\n return pd.read_csv(path + \"eog_200hz.csv\")\n\n # Add extension\n if dataset in [\"bio_resting_8min_200hz\"]:\n dataset += \".json\"\n\n # Specific case for json file\n if dataset.endswith(\".json\"):\n if \"https\" not in dataset:\n data = pd.read_json(path + dataset, orient=\"index\")\n else:\n data = pd.read_json(dataset, orient=\"index\")\n df = {}\n for participant, row in data.iterrows():\n for _, data_string in row.items():\n data_list = json.loads(data_string)\n data_pd = pd.DataFrame(data_list)\n df[participant] = data_pd\n\n return df\n\n # TODO: Add more EEG (fif and edf datasets)\n if dataset in [\"eeg_1min_200hz\"]:\n\n return pickle.load(\n urllib.request.urlopen(\n \"https://github.com/neuropsychology/NeuroKit/blob/dev/data/eeg_1min_200hz.pickle?raw=true\"\n )\n )\n\n # General case\n file, ext = os.path.splitext(dataset) # pylint: disable=unused-variable\n if ext == \"\":\n df = pd.read_csv(path + dataset + \".csv\")\n else:\n if \"https\" not in dataset:\n df = pd.read_csv(path + dataset)\n else:\n df = pd.read_csv(dataset)\n return df", "def load(self):\n\n super(DatasetLoader_XRite2016, self).sync()\n\n keys = (\n 'ColorChecker24 - After November 2014',\n 'ColorChecker24 - Before November 2014',\n 'ColorCheckerSG - After November 2014',\n 'ColorCheckerSG - Before November 2014',\n )\n filenames = (\n 'ColorChecker24_After_Nov2014.txt',\n 'ColorChecker24_Before_Nov2014.txt',\n 'ColorCheckerSG_After_Nov2014.txt',\n 'ColorCheckerSG_Before_Nov2014.txt',\n )\n\n # TODO: Implement support for \"CGATS\" file format in \"Colour\":\n # https://github.com/colour-science/colour/issues/354\n illuminant = (\n CCS_ILLUMINANTS['CIE 1931 2 Degree Standard Observer']['ICC D50'])\n\n self._content = OrderedDict()\n for key, filename in zip(keys, filenames):\n directory = os.path.splitext(filename)[0]\n path = os.path.join(self.record.repository, 'dataset', directory,\n filename)\n\n with codecs.open(path, encoding='utf-8') as xrite_file:\n samples = []\n is_data = False\n lines = filter(\n None, (line.strip() for line in xrite_file.readlines()))\n for line in lines:\n if line == 'END_DATA':\n is_data = False\n\n if is_data:\n tokens = line.split()\n samples.append([\n tokens[0],\n [\n float(value.replace(',', '.'))\n for value in tokens[1:]\n ],\n ])\n\n if line == 'BEGIN_DATA':\n is_data = True\n\n i, j = (6, 4) if len(samples) == 24 else (14, 10)\n samples = np.array(samples)\n samples = np.transpose(samples.reshape([i, j, 2]), [1, 0, 2])\n keys, values = zip(*samples.reshape([-1, 2]))\n values = XYZ_to_xyY(Lab_to_XYZ(values, illuminant))\n self._content[key] = ColourChecker(key,\n OrderedDict(zip(keys, values)),\n illuminant)\n\n return self._content", "def load_metadata(self):\n self.meta[\"user_tables\"] = pd.read_sql(self.SQL[\"User Tables\"], self.engine)\n self.meta[\"all_tables\"] = pd.read_sql(self.SQL[\"All Tables\"], self.engine)\n self.meta[\"all_databases\"] = pd.read_sql(self.SQL[\"All Databases\"], self.engine)", "def getDataForLoadComparisons(self):\n\n\t\t# Variables\n\t\tload_data = self.getLoadData() \n\t\tvalues = [] \n\t\tinner_dict = {}\n\t\touter_dict = {}\n\t\tfinal_data = []\n\t\tyesterday = self.helper.getYesterday()\n\t\tkey = self.helper.getYear() + self.helper.getMonth() + self.helper.getDay() + \"-loadData\"\n\t\tdata = load_data[yesterday[0]][int(yesterday[1])][int(yesterday[2])]\n\t\tdates = (['12:00 AM','1:00 AM','2:00 AM','3:00 AM','4:00 AM','5:00 AM',\n\t\t\t'6:00 AM','7:00 AM','8:00 AM','9:00 AM','10:00 AM','11:00 AM',\n\t\t\t'12:00 PM','1:00 PM','2:00 PM','3:00 PM','4:00 PM','5:00 PM',\n\t\t\t'6:00 PM','7:00 PM','8:00 PM','9:00 PM','10:00 PM','11:00 PM'])\n\n\t\t# Populating values array\n\t\tfor i in range(0,len(data)):\n\t\t\tinner_dict['label'] = dates[i]\n\t\t\tinner_dict['value'] = data[i]\n\t\t\tvalues.append(inner_dict)\n\t\t\tinner_dict = {}\n\n\t\t# Populating the final_data array and returning it\n\t\touter_dict['key'] = key\n\t\touter_dict['values'] = values\n\t\tfinal_data.append(outer_dict)\n\n\t\treturn final_data", "def dump_data(self):\n attr_names = [field for field in self.unique_together if field != 'parent']\n save_ndarrays_to_hdf5(\n self.data_path,\n [getattr(self, data_field) for data_field in self.data_fields],\n [self._get_dataset_path(field) for field in self.data_fields],\n attr_names,\n [getattr(self, attr_name) for attr_name in attr_names],\n )", "def load_data(objects):\n\n data = {}\n for obj in objects:\n if isinstance(obj, (str, pathlib.Path)):\n file = pathlib.Path(obj)\n logmsg(f'Loading file {file}')\n if file.suffix == '.json':\n data[file] = json.load(open(file))\n elif file.is_dir():\n mixtures = {}\n for ff in file.glob('*.json'):\n mixtures[ff.stem] = json.load(open(str(ff)))\n data[file] = mixtures\n else:\n data[file] = pd.read_csv(file, index_col=0, sep=',', low_memory=False,\n usecols=lambda col: not col.endswith('_ssq'))\n logmsg(f'Loaded file {file}')\n else:\n data[id(obj)] = obj\n return data", "def get_data(self):\r\n\r\n # Find the absolute path for the root dir (04-Decision-Science)\r\n # Uses __file__ as absolute path anchor\r\n root_dir = os.path.abspath('')\r\n\r\n # Use os library for Unix vs. Widowns robustness\r\n xls_path = os.path.join(root_dir, 'data')\r\n\r\n file_names = [f for f in os.listdir(csv_path) if f.endswith('.xls')]\r\n\r\n def key_from_file_name(f):\r\n if f[-4:] == '.xls':\r\n return f[:-4]\r\n\r\n # Create the dictionary\r\n data = {}\r\n for f in file_names:\r\n data[key_from_file_name(f)] = pd.read_excel(os.path.join(xls_path, f))", "def DataLoader():\n #importing data\n House_Prices_Uncleaned = pd.read_csv(\"zillow_data/Zip_zhvi_uc_sfrcondo_tier_0.33_0.67_sm_sa_mon.csv\")\n #Cleaning house prices data\n\n House_Prices=pd.DataFrame(House_Prices_Uncleaned[\"RegionName\"][House_Prices_Uncleaned[\"CountyName\"]==\"New York County\"])\n\n House_Prices[\"Price\"]=pd.DataFrame(House_Prices_Uncleaned[\"2020-09-30\"])\n\n House_Rent_Uncleaned= pd.read_csv(\"zillow_data/Zip_ZORI_AllHomesPlusMultifamily_SSA.csv\")\n\n #Cleaning house rent data\n House_Rent=pd.DataFrame(House_Rent_Uncleaned[\"RegionName\"])\n House_Rent[\"Rent\"]=pd.DataFrame(House_Rent_Uncleaned[\"2020-09\"])\n\n return House_Prices, House_Rent", "def all_data(self):\n return pd.concat([self.historic_data, self.dayahead_data])", "def datasets(self):\n pass", "def __load_company_data(self):\n\n for ticker_type, ticker_list in self.tickers.items():\n # yfinance only has sector, industry and country for stocks\n if ticker_type == \"STOCK\":\n for ticker in ticker_list:\n # Only gets fields for tickers with missing data\n # TODO: Should only get field missing for tickers with missing data\n # now it's taking the 4 of them\n if (\n self.__transactions.loc[\n self.__transactions[\"Ticker\"] == ticker,\n [\"Sector\", \"Industry\", \"Country\", \"Region\"],\n ]\n .isnull()\n .values.any()\n ):\n # Get ticker info in list [\"Sector\", \"Industry\", \"Country\", \"Region\"] from isin/ticker\n info_list = get_info_from_ticker(ticker)\n\n # Replace fields in transactions\n self.__transactions.loc[\n self.__transactions.Ticker == ticker,\n [\"Sector\", \"Industry\", \"Country\", \"Region\"],\n ] = info_list\n\n elif ticker_type == \"CRYPTO\":\n for ticker in ticker_list:\n if (\n self.__transactions.loc[\n self.__transactions[\"Ticker\"] == ticker,\n [\"Sector\", \"Industry\", \"Country\", \"Region\"],\n ]\n .isnull()\n .values.any()\n ):\n # Get ticker info in list [\"Sector\", \"Industry\", \"Country\", \"Region\"]\n info_list = [\"Crypto\", \"Crypto\", \"Crypto\", \"Crypto\"]\n\n # Replace fields in transactions\n self.__transactions.loc[\n self.__transactions.Ticker == ticker,\n [\"Sector\", \"Industry\", \"Country\", \"Region\"],\n ] = info_list\n\n else:\n for ticker in ticker_list:\n if (\n self.__transactions.loc[\n self.__transactions[\"Ticker\"] == ticker,\n [\"Sector\", \"Industry\", \"Country\", \"Region\"],\n ]\n .isnull()\n .values.any()\n ):\n # Get ticker info in list [\"Sector\", \"Industry\", \"Country\", \"Region\"]\n info_list = [\"-\", \"-\", \"-\", \"-\"]\n\n # Replace fields in transactions\n self.__transactions.loc[\n self.__transactions.Ticker == ticker,\n [\"Sector\", \"Industry\", \"Country\", \"Region\"],\n ] = info_list", "def Export_in_files(COVID_data, COVID_data_reconstructed):\r\n F_data_file = open(Datafiles_directory + '\\\\OWID COVID data %s formatted.csv' % (date.today().isoformat()), 'w')\r\n FR_data_file = open(Datafiles_directory + '\\\\OWID COVID data %s formatted reconstructed.csv' % (date.today().isoformat()), 'w')\r\n \r\n COVID_data_lists = [COVID_data, COVID_data_reconstructed]\r\n Data_file_list = [F_data_file, FR_data_file]\r\n Countries_list = list(COVID_data.keys())[1:]\r\n \r\n for Data_set_inc in range(2): # Each data list (raw and reconstructed) is written in its corresponding file\r\n COVID_data_temp = COVID_data_lists[Data_set_inc]\r\n Data_file_temp = Data_file_list[Data_set_inc]\r\n \r\n Data_file_temp.write('Country;Date;' + ';'.join(COVID_data_temp['_Country']['Date']) + '\\n')\r\n \r\n for Country in Countries_list:\r\n COVID_data_single_country = COVID_data_temp[Country]\r\n \r\n Date_list = list(COVID_data[Country].keys())\r\n for Date in Date_list:\r\n COVID_data_single_country_single_date = COVID_data_single_country[Date]\r\n Row_reformatted = ['' if Item == None else str(Item).replace('.', ',') for Item in COVID_data_single_country_single_date] # None elements are replaced by empty strings because an empty cell is better to see that there is no data in excel rather than None\r\n \r\n Data_file_temp.write('%s;%s;' % (Country, Date))\r\n Data_file_temp.write(';'.join(str(Item) for Item in Row_reformatted))\r\n Data_file_temp.write('\\n')\r\n \r\n Data_file_temp.close()", "def load_oat1_3_big(self):\n source_df = pd.read_csv('./datasets/metabolites/OAT1OAT3Big.csv')\n source_df['SLC'] = source_df['SLC'].astype('category').cat.codes\n\n to_drop = [0, 2, 3, 4, ]\n\n df = source_df.drop(source_df.columns[to_drop], axis=1)\n\n print('Loaded in data, null values found: ', end=' ')\n print(df[pd.isnull(df).any(axis=1)])\n\n label_index = 1 # this is from source\n print(\"Data shape: \", df.shape[0])\n\n X = np.array([np.array(df.iloc[x, :]) for x in range(df.shape[0])])\n Y = np.array(source_df.iloc[:, label_index])\n\n header = np.array(df.columns)\n\n if self.scale:\n feature_scaler = StandardScaler()\n X = feature_scaler.transform(X)\n\n return X, Y, header", "def read_data_files(self):\n\n for name, snap in zip(self.names, self.snaps):\n # build the very important dictionary:\n key = f'{name}_{snap:03}' # e.g 'MW_000'\n self.galaxies[key] = Galaxy(name, snap, self.path, \n self.usesql, self.ptype, self.stride)\n self.time = self.galaxies[key].time\n\n # bits of minor housekeeping:\n # self.path = self.galaxies[key].filepath # may speed up next search\n self.filenames.append(key)", "def load_data(self):\n\t\ti = 0\n\n\t\tpaths = glob.glob(self.file_path+'/rollout_*')\n\t\tself.rollouts = []\n\n\n\t\tfor path in paths:\n\t\t\tdata_point = np.load(path,encoding='latin1')\n\t\t\tself.rollouts.append(data_point)\n\n\t\treturn paths", "def load_values(self):\n # TODO: Add self.prefix and extension\n NetworkTables.loadEntries(self.file.get_filename(), prefix='/vision/' + self.name + '_')", "def load_vox(data: pd.DataFrame, in_place: bool = False):\n if not in_place:\n mod = data.copy()\n else:\n mod = data\n clips = []\n channels = []\n samplerates = []\n durations = []\n for i, row in mod.iterrows():\n sys.stdout.write(f\"\\r[-] Reading: {i} of {len(mod)} ({i / len(mod) * 100: .2f}%)\")\n sys.stdout.flush()\n with audioread.audio_open(row['file']) as f:\n channels.append(f.channels)\n samplerates.append(f.samplerate)\n durations.append(f.duration)\n data = bytearray()\n for buf in f:\n data = data + buf\n clips.append(data)\n sys.stdout.write(f\"\\r[ ] Read {len(mod)} files into DataFrame.\\r\\n\")\n sys.stdout.flush()\n mod['audio'] = pd.Series(clips)\n mod['channels'] = pd.Series(channels)\n mod['samplerate'] = pd.Series(samplerates)\n mod['duration'] = pd.Series(durations)\n return mod" ]
[ "0.6741529", "0.63294286", "0.619855", "0.60682344", "0.60343254", "0.5896149", "0.5864683", "0.5835448", "0.58048934", "0.5794775", "0.57485104", "0.574704", "0.5740164", "0.5707918", "0.5695425", "0.5673318", "0.5653236", "0.5649809", "0.56193334", "0.55798227", "0.5578606", "0.55723476", "0.55557495", "0.55381346", "0.5530428", "0.55265117", "0.55151325", "0.5505278", "0.550316", "0.55022466", "0.5500382", "0.5497901", "0.54947156", "0.5487048", "0.5484113", "0.5471775", "0.54689944", "0.5466092", "0.5464807", "0.5459699", "0.5455604", "0.5450553", "0.54494566", "0.54328716", "0.5419645", "0.5414712", "0.54114085", "0.540983", "0.5401669", "0.53979105", "0.53826374", "0.5357493", "0.5357458", "0.53564435", "0.5350917", "0.5347492", "0.5343092", "0.5341089", "0.5340421", "0.533899", "0.53327173", "0.53199494", "0.53114057", "0.53114057", "0.5311121", "0.53018874", "0.52929235", "0.529277", "0.5288739", "0.5287368", "0.52863294", "0.52861553", "0.52843827", "0.52812165", "0.527937", "0.5278781", "0.52762055", "0.5275012", "0.52729344", "0.5268662", "0.52631533", "0.5257692", "0.5254157", "0.5254078", "0.52526945", "0.52505565", "0.5246719", "0.52451116", "0.5244721", "0.5244651", "0.5244265", "0.52318454", "0.5230717", "0.52300185", "0.52290595", "0.5223917", "0.52192813", "0.52177924", "0.5216242", "0.5210887" ]
0.5710556
13
Initialize the linked list.
def __init__(self): self.head = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, lst=[]):\n self.__length = 0 # current length of the linked list\n self.__head = None # pointer to the first node in the list\n for e in lst: # initialize the list,\n self.add(e) # by adding elements one by one", "def __init__(self):\n node = ListNode(0) # dummy\n self.head = node\n self.tail = node\n self.len = 0", "def __init__(self, init):\n self.stepforward = int(init)\n self.data = Linkedlist()", "def __init__(self):\n self._head = self._Node(None, None, None)\n self._tail = self._Node(None, None, None)\n self._head._next = self._tail\n self._tail._prev = self._head\n self._size = 0", "def __init__(self, linked_list: object):\n self.current_node = linked_list._head", "def __init__(self):\n\n self.head = None\n self.node_count = 0", "def __init__(self):\n\t\tself._head = None\n\t\tself._tail = None\n\t\tself._size = 0", "def __init__(self, head: ListNode):\n self.l = []\n while head:\n self.l.append(head.val)\n head = head.next", "def __init__(self):\n self.head = ListNode()", "def __init__(self):\n self.size = 0\n self.head, self.tail = Node(0), Node(0)\n self.head.next = self.tail\n self.tail.prev = self.head", "def __init__(self):\n self.head = None\n self.length = 0", "def __init__(self):\n self.head = None\n self.length = 0", "def __init__(self):\n\t\tself.head = None\n\t\tself.tail = None", "def __init__(self):\n self.head = None\n self.tail = None\n self.current_node = None", "def __init__(self):\n self.head = None\n self.tail = self.head", "def __init__(self):\n try:\n self.head=None\n\n except Exception as error:\n print (f\"There is error in __init__ of LinkedList, the error {error}\")", "def __init__(self):\n\n self.__head = None", "def __init__(self):\n self._head = None\n self._tail = None\n self._size = 0", "def __init__(self):\n self._head = None\n self._tail = None\n self._size = 0", "def __init__(self):\n self._head = None\n self._tail = None\n self._size = 0", "def __init__(self):\n self.head = None\n self.tail = None", "def __init__(self):\n self.head = None\n self.tail = None", "def __init__(self, head: ListNode):\n self.nodes = []\n\n while(head):\n self.nodes.append(head)\n head = head.next", "def __init__(self):\n self.length = 0\n self.head = None", "def __init__(self):\n self.head = None\n self.size = 0", "def __init__(self):\n self.head = None\n self.size = 0", "def __init__(self):\n self.head = None\n self.size = 0", "def __init__(self):\n\n self.head = None", "def __init__(self):\n\n self.head = None", "def __init__(self):\n\n self.head = None", "def __init__(self):\r\n self._head = None\r\n self._tail = None\r\n self._size = 0", "def __init__(self):\n self._head = self._tail = None\n self._size = 0", "def __init__(self):\n\t\tself.current = None\n\t\tself.head = None", "def __init__(self):\n\n self.head = None\n self.tail = None\n self.size = 0", "def __init__(self, items):\r\n if len(items) == 0: # No items, and an empty list!\r\n self._first = None\r\n else:\r\n self._first = _Node(items[0])\r\n curr = self._first\r\n for item in items[1:]:\r\n curr.next = _Node(item)\r\n curr = curr.next", "def __init__(self):\n self.__head = None", "def __init__(self):\n self.__head = None", "def __init__(self, lst=[]):\r\n self.__length = 0 # current length of the linked list\r\n self.__head = None # pointer to the first node in the list\r\n self.__last = None # pointer to the last node in the list\r\n lst.reverse() # reverse to ensure elements will appear in same order\r\n for e in lst: # add elements of input list lst one by one\r\n self.add(e)", "def __init__(self, head: ListNode):\n self.head = head\n self.list = []\n while head:\n self.list.append(head.val)\n head = head.next", "def __init__(self):\n \n self.array = [LinkedListNode(None, None) for i in range(10000)]", "def __init__(self, head):\n self.head = head\n self.length = 0\n node = head\n while node:\n node = node.next\n self.length += 1", "def __init__(self):\n self.head = None\n self.tail = None\n self.count = 0", "def __init__(self):\n self.dummy = ListNode(-1)\n self.cnt = 0", "def __init__(self):\n self.head = None\n self.tail = None\n self.size = 0", "def __init__(self, items):\n if len(items) == 0:\n self._first = None\n self._rest = None\n else:\n self._first = items[0]\n self._rest = LinkedListRec(items[1:])", "def __init__(self) -> None: \n SortedList.__init__(self)\n self.head = None", "def __init__(self, head: ListNode):\n self.head = head", "def __init__(self, head: ListNode):\n self.head = head", "def __init__(self, head: ListNode):\n self.head = head", "def __init__(self, head: ListNode):\n self.head = head", "def __init__(self, head: ListNode):\n self.head = head", "def __init__(self, l):\n self.l = l\n self.next = None\n self.prev = None\n self.prev_n = -1\n self.next_n = -1", "def __init__(self, strict=None):\n if strict is None:\n strict = False\n Node.strict = strict\n\n super(LinkedList, self).__init__()\n self.first_node = None", "def __init__(self):\r\n self.head = None", "def __init__(self):\n\n self.head = linkNode()\n self.tail = None\n # print(self.head.val)", "def __init__(self):\n self.size = 0\n self.head = Node(0)", "def __init__(self):\n self.head = Block()\n self.tail = Block()\n self.head.next = self.tail\n self.tail.prev = self.head\n self.mapping = {}", "def __init__(self, head: ListNode):\n self.head = head\n temp = head\n i = 0\n while temp is not None:\n i+=1\n temp = temp.next\n self.len = i # 找到list的长度", "def __init__(self, head=None):\n\n self.head = head", "def __init__(self, head):\n self.head = head", "def __init__(self, head):\n self.head = head", "def __init__(self, head):\n self.head = head", "def __init__(self, head):\n self.head = head", "def __init__(self, data=None):\n if data is not None:\n self._size = 1\n self.head = Node(data)\n self.tail = self.head\n else:\n self._size = 0\n self.head = None\n self.tail = None", "def __init__(self):\n self.__list = None\n self.__length = 0", "def __init__(self, head=None):\r\n self.head = head", "def __init__(self, init_size=8):\n # Create a new list (used as fixed-size array) of empty linked lists\n self.buckets = [LinkedList() for _ in range(init_size)]", "def __init__(self):\n self.min_stack = []\n self.listHead = LNode(0, 0)", "def __init__(self, item):\r\n self.item = item\r\n self.next = None # Initially pointing to nothing\r", "def __init__(self, value, next=None):\n self.value = value # element at the node\n self.next = next # reference to next node in the LinkedList", "def __init__(self):\n self.head = PrefixNode('', False)", "def __init__(self, *args):\n this = _libsbml.new_ListOfInitialAssignments(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n self.head = None", "def __init__(self, iterable=None):\n # Initialize a new linked list to store the items\n # print(\"self __init__\", self)\n self.list = LinkedList()\n # self.top = self.list.head\n if iterable is not None:\n for item in iterable:\n self.push(item)", "def __init__(self, items=None):\n self.head = None # First node\n self.tail = None # Last node\n # Append given items\n if items is not None:\n for item in items:\n self.append(item)", "def __init__(self):\n\t\tself.state = None\n\t\tself.info = None\n\t\tself.next = None", "def __init__(self):\n # 'count' is to store the length of chain that the 'singlyLinkedList' refers to, the number of values, not keys\n self.count = 0 # type: int\n # To store existing keys in the 'singlyLinkedList'\n self.keys = [] # type: List[keyType]\n # To deal with collision.\n self.singlyLinkedList = [] # type: List[ChainNode]", "def __init__(self, data=None):\n self.head = None\n self.tail = None\n if data is not None:\n try:\n for item in data:\n if item is data[0]:\n self.head = Node(item, next=None)\n self.tail = self.head\n else:\n self.head = Node(item, self.head)\n except TypeError:\n node = Node(data, next=None)\n self.head = node\n self.tail = self.head", "def __init__(self, data=None):\n self.data = data\n # initializing an empty node that has no next nor prior node\n self.next = self.prior = None", "def __init__(self):\n self.hash_table = {}\n self.count_table = {}\n self.head = CountListNode()\n self.tail = CountListNode()", "def __init__(self, iterable=None):\n self.list = LinkedList()\n\n if iterable:\n for item in iterable:\n self.enqueue(item)", "def __init__(self):\n self._head = None # reference to the head node\n self._size = 0 # number of stack elements", "def __init__(self, num_elements):\n\n # Create array of linked lists\n self.main_array = [LinkedList() for i in range(num_elements)]\n self.num_elements = num_elements", "def __init__(self):\n self.l = []", "def __init__(self):\n self.graph = LinkedList()", "def __init__(self, iterable=None):\n # Initialize a new linked list to store the items\n self.list = LinkedList()\n if iterable is not None:\n for item in iterable:\n self.push(item)", "def __init__(self, size=1):\n self._head = None\n self._tail = None\n self._size = 0", "def __init__(self):\n self.numDict = dict() # 用于存储key与双向链表中node(key,node)映射关系\n self.head = Node(\"\", -1) # 头节点 存储最小值\n self.tail = Node(\"\", -1) # 尾节点 存储最大值\n # 初始化双向链表\n self.tail.prev = self.head\n self.head.next = self.tail" ]
[ "0.793523", "0.7746552", "0.7618733", "0.76008236", "0.7564557", "0.7530009", "0.74541533", "0.74384236", "0.7436091", "0.74262166", "0.74253154", "0.74253154", "0.73937964", "0.7374561", "0.7347711", "0.7333443", "0.7332118", "0.7326369", "0.7326369", "0.7326369", "0.731081", "0.731081", "0.7294253", "0.7284983", "0.7278687", "0.7278687", "0.7278687", "0.72745514", "0.72745514", "0.72745514", "0.72707266", "0.72631246", "0.7256296", "0.72543746", "0.72534615", "0.72519386", "0.72519386", "0.72509634", "0.7237581", "0.7235863", "0.72327924", "0.72286206", "0.7221722", "0.7214259", "0.72138005", "0.720262", "0.71873903", "0.71873903", "0.71873903", "0.71873903", "0.71873903", "0.717279", "0.71011084", "0.70695245", "0.70539945", "0.704633", "0.70340437", "0.7003384", "0.6957721", "0.6933524", "0.6933524", "0.6933524", "0.6933524", "0.68965936", "0.68835974", "0.68596506", "0.6841702", "0.68385476", "0.68355453", "0.67910063", "0.67693144", "0.67611724", "0.67392206", "0.67330897", "0.6731233", "0.6721142", "0.67136055", "0.67119473", "0.66991115", "0.66984457", "0.668866", "0.6679864", "0.6676319", "0.6653953", "0.664952", "0.66414255", "0.6634318", "0.6623309" ]
0.7175879
63
Add a node to the linked list.
def push(self, val): self.head = Node(val, self.head)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_node(self, node):\n self.nodes.append(node)", "def add_node(self, node):\n self.nodes.add(node)", "def addNode (self, node):\n self.__nodes.add(node)", "def add_node(self, node):\n self._nodes.add(node)", "def add_node(self, node):\n self._nodes.add(node)", "def addNode(self, node: Node):\n self.nodes.append(node)", "def AddNode(self, node):\n self.nodes.append(node)\n return node", "def add_node(self, node):", "def add(self, item):\n \n n = Node(item)\n n.set_next(self.head)\n self.head = n", "def add(self, item):\n node = Node(item)\n node.next = self.head\n self.head = node", "def add_node (self, node):\n raise NotImplementedError", "def _add_node(self, node: int) -> None:\r\n self.nodes.add(node)", "def add_node(self, node):\n if node not in self.nodes:\n self.nodes.append(node)", "def add_node(self, value):\n node = Node(value)\n\n if self.head is None:\n self.head = node\n self.tail = node\n\n if self.current_node is None:\n self.current_node = node\n\n self.current_node.next = node\n self.current_node = node", "def add_node(self) -> Node:\n new_node = Node(self.__next_id)\n self.__nodes[self.__next_id] = new_node\n self.__next_id += 1\n return new_node", "def add_node(self, node):\n if node not in self.nodes:\n self._nodes.append(node)", "def add_node(self, node):\n self.nodes[node.name] = node\n self.dirty = True", "def add_node (self, node):\n self.network.add_node(node.id)\n self.network.node[node.id] = node", "def add_node(self, node: Node) -> None:\n assert len(\n self.network) <= 10, \"Too many nodes attempted to be placed in network\"\n self.network.append(node)", "def register_node(self, node):\n self.nodes.add(node)", "def append(self, node):\n if not isinstance(node, Node):\n # If the node parameter is not a Node then update it\n # to refer to one.\n node = Node(node)\n\n if self.first_node is None:\n # The first_node is None therefore we just set it.\n self.first_node = node\n else:\n # Find the last_node in the list and update it's next attribute.\n self.last_node.next = node", "def add_node(self, data):\n new_node = Node(data)\n if self.cur_node is not None:\n new_node.next, self.cur_node.next = self.cur_node.next, new_node\n self.cur_node = new_node\n self.length += 1\n self.cur_pos += 1\n if self.start_node is None:\n self.start_node = self.cur_node\n # print(\"Node({}) added to {}\".format(new_node.data, self.cur_pos-1))", "def add(self, value):\n # Find the tail\n tail = self.head\n while tail and tail.next:\n tail = tail.next\n\n if tail:\n # Add a new node with the value\n tail.next = Node(value, tail, None)\n else:\n # Add first node to the list\n self.head = Node(value, None, None)", "def add_node(self, new_node: 'GraphNode'):\n self.operator.add_node(new_node)", "def _add_node(self, node):\n node.prev = self.head\n node.next = self.head.next\n\n self.head.next.prev = node\n self.head.next = node", "def add_node(self, node):\n self._nodes[node.id] = node\n self._clear_cache()", "def add_node(self, node):\n\n node.number = len(self.nodes)\n node.id = len(self.nodes)\n\n if node.id not in self.nodes:\n self.nodes[node.id] = node\n\n return self", "def add_node(self, node):\n self.nodeset.add(node) # add the input node to the nodeset\n\n self.__check_validity() # check if graph is valid - throws exception if not", "def addNode(self, new_value): # Class O(n)\r\n if type(new_value) is not int: raise ValueError(\"Please, insert an integer\")\r\n h = self.head\r\n while 'next' in dir(h.next):\r\n h = h.next\r\n else:\r\n h.next = Node(new_value)", "def add_node(self, node):\n temp = self.head.post\n self.head.post = node\n node.pre = self.head\n node.post = temp\n temp.pre = node", "def add_node(self, element, node_id = None):\n if node_id is None:\n new_node_id = self._next_id\n self._next_id += 1\n else:\n new_node_id = node_id\n self._next_id = node_id + 1\n \n new_node = GraphIncidenceList.Node(new_node_id, element)\n self._nodes[new_node._id] = new_node\n self._inc[new_node._id] = LinkedList()", "def add_node(self, node):\n self.nodes.append(node)\n self.edges[node.identifier] = {}\n self._id2node[node.identifier] = node\n node.parent = None", "def push_node(self, node):\n n = node\n if self.empty():\n self.head = n\n return\n\n l = self.head\n while l.next is not None:\n l = l.next\n l.next = n\n return", "def append_node(self, node):\n self.nodes.append(node)\n node.slot = len(self.nodes)", "def add_node(self, node):\n self.nodes[node.id] = node\n\n self.layers = max(self.layers, node.layer + 1)", "def add_node(self, **kwargs):\n self._content.append(Node(**kwargs))", "def add_node(self, node):\n\n # Add node only if it does not exist yet\n if node.id() in self.__nodes:\n return\n\n labels = node.labels()\n for label in labels:\n break\n\n if label not in self.__labels:\n self.__labels[label] = len(self.__labels)\n\n js = \"nodes.push({index: \" + str(node.id()) + \", \" +\\\n \"name: \\\"\" + str(node.id()) + \"\\\", \" +\\\n \"group: \" + str(self.__labels[label]) + \\\n \" });\"\n\n d3_node_id = self.frame.evaluateJavaScript(js) - 1\n self.__nodes[node.id()] = str(d3_node_id)\n logger.info(\"node id %s - > d3 id: %s\", node.id(), d3_node_id)", "def add_node(self, name, node):\n self.nodes.setdefault(name, node)", "def addNode(self, val):\n\t\tnode = self.createNode(val)\n\t\tif self.head is None:\n\t\t\tself.head = node\n\t\t\treturn node\n\t\tcur = self.head\n\t\twhile cur.getNext() is not None:\n\t\t\tcur = cur.getNext()\n\t\tcur.setNext(node)\n\t\treturn node", "def append(self, item):\n new_node = Node(item)\n if self.tail is not None:\n self.tail.next = new_node\n if self.head is None:\n self.head = new_node\n self.tail = new_node", "def append(self, new_node):\n \n if self.head is None:\n self.head = new_node\n return None\n\n curr_node = self.head\n while curr_node.next is not None:\n curr_node = curr_node.next\n curr_node.next = new_node", "def add_node(self,node):\n \n vertex = Vertex(node)\n \n self.nodes[node] = vertex\n self.numNodes += 1", "def addNode(l: ListNode, v: int) -> ListNode:\n node = ListNode(v)\n l.next = node\n return l", "def append(self, item):\n \n n = Node(item)\n current = self.head\n \n # Special case - empty list\n if current is None:\n self.head = n\n else:\n # Find the last node\n while current.get_next() is not None:\n current = current.get_next()\n current.set_next(n)", "def add_node(self, state, other):\n\t\tnew_node = Node()\n\t\tnew_node.state = state\n\t\tnew_node.info = other\n\n\t\tif self.head == None:\n\t\t\tself.current = new_node\n\t\t\tself.head = new_node\n\t\telse:\n\t\t\tself.current.next = new_node\n\t\t\tself.current = self.current.next", "def append_node(self, new_data):\n\n #create a new node and put new data.\n new_node = Node(new_data)\n\n if self.head is None:\n self.head = new_node\n return\n\n end = self.head\n while end.next:\n end = end.next\n\n end.next = new_node", "def add_node(self, node_data):\n self.__rtags.append(True)\n self.__nodedata.append(data)\n self.__ltags.append(True)", "def addNode(self, new_data):\r\n curr = self.head\r\n\r\n # Add new Node\r\n if curr is None:\r\n n = Node(new_data) \r\n self.head = n\r\n return\r\n \r\n # Sort Nodes \r\n if curr.data > new_data:\r\n n = Node(new_data) \r\n n.next = curr\r\n self.head = n\r\n return\r\n\r\n while curr.next is not None:\r\n if curr.next.data > new_data:\r\n break\r\n curr = curr.next\r\n n = Node(new_data) \r\n n.next = curr.next\r\n curr.next = n\r\n return", "def add_node(self, node):\r\n self.undeclared_nodes.append(node)", "def addNode(self, nodeItem):\n assert isinstance(nodeItem, NodeItem)\n self.addItem(nodeItem)", "def add_node(self, node):\n if node in self.edges:\n raise ValueError('Duplicate node')\n else:\n self.edges[node]=[]\n self.nodes.add(node)", "def add(self, item):\n newNode = Node(item, None)\n if self.isEmpty():\n self._front = newNode\n else:\n self._rear.next = newNode\n self._rear = newNode\n self._size += 1", "def append(self, value):\n new_node = Node(value)\n if self.head:\n node = self.head\n while node.next != self.head:\n node = node.next\n node.next = new_node\n else:\n self.head = new_node\n new_node.next = self.head", "def append(self, data):\n new_node = Node(data)\n\n if self.head is None:\n self.head = new_node\n return\n\n last_node = self.head\n while last_node.next:\n last_node = last_node.next\n last_node.next = new_node", "def append(self, data):\n new_node = Node(data)\n\n if self.head is None:\n self.head = new_node\n return\n\n last_node = self.head\n while last_node.next:\n last_node = last_node.next\n last_node.next = new_node", "def addAtHead(self, val):\n node = ListNode(val)\n if self.head == None:\n self.head = node\n else:\n node.next = self.head\n self.head = node", "def append(self, data):\n new_node = Node(data)\n\n if not self.head:\n self.head = new_node\n return\n\n last_node = self.head\n while last_node.next:\n last_node = last_node.next\n\n last_node.next = new_node", "def add_node(self, node):\n index = self._node_index.setdefault(node.ntype, dict())\n if node.ext_id not in index:\n index.setdefault(node.ext_id, node)\n self._type_list.setdefault(node.ntype, list()).append(node)", "def add(self, item):\n \n previous = None\n current = self.head\n \n while current is not None:\n if current.get_data() > item:\n break\n else:\n previous = current\n current = current.get_next()\n \n n = Node(item)\n # If node is to be added at the beginning (incl. case of empty list)\n if previous is None:\n n.set_next(self.head)\n self.head = n\n else:\n previous.set_next(n)\n n.set_next(current)", "def add_to_head(self, value):\n\n new_node = ListNode(value)\n if self.size == 0:\n self.head = new_node\n self.tail = new_node\n\n else:\n new_node.next = self.head\n self.head.prev = new_node\n new_node.next = self.head\n self.head = new_node\n\n # increments the size attribute after adding node to list\n self.size += 1", "def append(self, data):\n node = Node(data)\n if not self.head:\n self.head = node\n return\n temp = self.head\n while temp.next:\n temp = temp.next\n temp.next = node", "def add(self, data):\n node = Node(data)\n if self.head == None:\n self.head = node\n\n else:\n traverse = self.head\n if self.head.data > node.data:\n self.head = node\n node.next = traverse\n\n if self.head.data < node.data:\n temp = self.head\n while traverse.next != None:\n if traverse.data < node.data:\n temp = traverse\n traverse = traverse.next\n\n if traverse.data < node.data:\n temp = traverse\n\n temp1 = temp.next\n temp.next = node\n node.next = temp1", "def add(self, data, add_pos=None):\n node = Element(data)\n if add_pos is None:\n add_pos = self.__length\n if self.__list is None:\n self.__list = node\n else:\n if add_pos == 0:\n node.set_next(self.__list)\n self.__list = node\n else:\n current = self.__list\n current_pos = 1\n while current.get_next() is not None and current_pos < add_pos:\n current = current.get_next()\n current_pos += 1\n node.set_next(current.get_next())\n current.set_next(node)\n self.__length += 1", "def addNode(self):\n\t\tself.head.insert(self.size, len(self.succ))\n\t\tself.size += 1", "def append(self, data):\n if self.head is None: # checking a corner case of linked list being empty\n self.head = ListNode(data)\n else: # a normal traversal and append to the end of the tail node\n temp_node = self.head\n new_node = ListNode(data)\n while temp_node.next is not None:\n temp_node = temp_node.next\n temp_node.next = new_node", "def append(self, value):\n if self.head is None:\n self.head = Node(value)\n return\n node = self.head\n while node.next:\n node = node.next\n node.next = Node(value)", "def add_node(self, node: Node) -> None:\n\t\t# Check for conflicts with current nodes; iterate over nodes\n\t\tfor index in range(len(self.nodes)):\n\t\t\t# Exit if comparison fails. Node can update itself from the compare() method\n\t\t\tif not self.nodes[index].compare(node):\n\t\t\t\treturn\n\n\t\t# Add the Node if no conflicts\n\t\tself.nodes.append(node)", "def add(self, node, arrow = None):\n## print(node)\n self.graph = addNode(self.graph, node, arrow)", "def append(self, data):\n if not self.head:\n self.head = DListNode(data=data)\n return\n curr = self.head\n while curr.next:\n curr = curr.next\n curr.next = DListNode(data=data, prev=curr)", "def addNode( self, n, **attr ):\n self._G.add_node(n, attr)", "def add_node(self, element, node_id = None):\n if node_id is None:\n new_node_id = self._next_id\n self._next_id += 1\n else:\n new_node_id = node_id\n self._next_id = node_id + 1\n \n new_node = GraphIncidenceList.Node(new_node_id, element)\n self._nodes[new_node._id] = new_node\n self._inc[new_node._id] = Set()", "def add(self, node, name=None):\r\n\r\n name = name or self._generate_node_name()\r\n\r\n if name in self.nodes:\r\n raise KeyError(\"Node with name %s already exists\" % name)\r\n\r\n self.nodes[name] = node\r\n\r\n return name", "def add_to_head(self, value):\n node = Node(value)\n if self.head is not None:\n node.set_next(self.head)\n\n self.head = node", "def append(self, data):\n\n node = Node(data)\n\n if self.head == None:\n\n self.head = node\n\n else:\n\n traverse = self.head\n\n while traverse.next != None:\n traverse = traverse.next\n\n traverse.next = node", "def append(self, value):\n node = Node(value)\n if self._head is None:\n self._head = node\n else:\n current = self._head\n while current.next:\n current = current.next\n current.next = node\n self._size += 1", "def add_first(self, node_to_add):\n node_to_add.next = self.head\n self.head = node_to_add", "def append(self, data):\n if self.head is None:\n self.head = ListNode(data, None)\n else:\n itr = self.head\n while itr:\n if itr.next is None:\n itr.next = ListNode(data, None)\n return\n itr = itr.next", "def append(self, value):\r\n\r\n if self.head is None:\r\n self.head = Node(value)\r\n return\r\n node = self.head\r\n while node.next:\r\n node = node.next\r\n node.next = Node(value)", "def append(self, data):\n new_node = Node(data)\n if self.head is None:\n self.head = new_node\n self.tail = new_node\n else:\n self.tail.next = new_node\n self.tail = new_node\n self.size += 1", "def add_node(self, node, weight=1):\n self._nodes.add(node)\n self._weights[node] = weight\n self._rebuild_circle()", "def append(self, data):\r\n new_node = Node(data)\r\n current_node = self.head\r\n while current_node.next!=None:\r\n current_node = current_node.next\r\n current_node.next = new_node #when we are at the last node, set it's pointer to point at the new Node\r", "def add_child(self, node):\n self.children.append(node)", "def add_child(self, node):\n self.children.append(node)", "def add_child(self, node):\n self.children.append(node)", "def add_last(self, node_to_add):\n if self.head == None:\n self.head = node_to_add\n return\n node = self.head\n # while node.next is not None:*\n while node.next is not None:\n node = node.next\n node.next = node_to_add", "def add_neighbor(self, node):\n self.neighbors.append(node)", "def add_node(self, metadata, pos):\n node = Node(metadata, pos)\n self.addItem(node)\n self.nodes[node.id] = node\n return node", "def addAtHead(self, val):\n new_node = ListNode(val)\n new_node.next = self.head\n self.head = new_node\n self.length += 1", "def add_node(self, node_name: str, external_id: Optional[str] = None) -> None:\r\n self._nodes.add_node_by_name(node_name, external_id)", "def append(self, item: Any) -> None:\n new_node = _Node(item)\n\n if self._first is None:\n self._first = new_node\n else:\n curr = self._first\n while curr.next is not None:\n curr = curr.next\n\n curr.next = new_node\n self._length += 1", "def addAtHead(self, val):\n node = ListNode(val)\n node.next = self.head.next\n self.head.next = node\n if self.head is self.tail:\n self.tail = node\n self.len += 1", "def add_node(self, address):\n self._nodes.append(util.IPv4Address(address))", "def push(self, new_node):\n \n if self.head is None:\n self.head = new_node\n return None\n \n new_node.next = self.head\n self.head = new_node", "def addNode(self, appendIt=False, nodeId=None, childId=None, sublist=None, label=''):\n node = DoubleLinkList.Node(nodeId=nodeId, childId=childId, sublist=sublist, label=label)\n if not self.head:\n # Empty list, add as the first entity\n self.head = self.tail = self.cursor = node\n else:\n if appendIt:\n node.pref = self.tail\n node.pref.nref = node\n self.tail = self.cursor = node\n else:\n nodeAfter = self.cursor\n nodeBefore = self.cursor.pref\n node.nref = nodeAfter\n nodeAfter.pref = node\n if self.atHead():\n self.head = self.cursor = node\n else:\n node.pref = nodeBefore\n nodeBefore.nref = node\n self.cursor = node\n return node", "def append(self, data):\n\n new_node = Node(data)\n if self.head is None:\n self.head = new_node\n self.tail = new_node\n else:\n self.tail.next = new_node\n self.tail = new_node\n self.size += 1", "def addAtHead(self, val):\n new_node = Node(val)\n new_node.next = self.head\n self.head = new_node\n self.length += 1", "def do_add_node(self, line=''):\n self.fibbing.add_node()", "def append(self, data):\n new_node = SingleNode(data)\n\n if self.head is None:\n self.head = new_node\n return\n last_node = self.head\n while last_node.next:\n last_node = last_node.next\n last_node.next = new_node", "def add_node(self, properties=None, **kwargs):\r\n\t\tif properties is None:\r\n\t\t\tproperties={}\r\n\t\t# may change method sig of Node since we can always combine arguments\r\n\t\t# here\r\n\t\tnode = Node(self._nextid, properties, **kwargs)\r\n\t\tself._nodes[self._nextid] = node\r\n\t\tself._nextid += 1\r\n\t\treturn node", "def add_node(self, node: str, **kwargs: Any) -> None:\n if \"data\" in kwargs:\n data = kwargs[\"data\"]\n else:\n data = kwargs\n self.graph.add_node(node, **data)", "def append(self, value):\n current = self.head\n\n while current:\n if current.next == None:\n current.next = Node(value)\n break\n current = current.next" ]
[ "0.8397831", "0.83893156", "0.82865614", "0.8239832", "0.8239832", "0.81677186", "0.8124044", "0.80292726", "0.8016808", "0.79811966", "0.7921723", "0.7920711", "0.7804295", "0.7771042", "0.7770391", "0.7748963", "0.7730202", "0.7698047", "0.7654877", "0.7544322", "0.7534985", "0.7499565", "0.7499552", "0.74962974", "0.7489393", "0.7463247", "0.74490154", "0.7407426", "0.7395585", "0.7369295", "0.7342063", "0.7334213", "0.7283768", "0.727195", "0.7241142", "0.7239309", "0.7233101", "0.7232056", "0.72071725", "0.7200376", "0.7181759", "0.7175654", "0.7159385", "0.71551585", "0.71491104", "0.714381", "0.7129318", "0.7114426", "0.7103498", "0.7093034", "0.70813394", "0.70796955", "0.7079326", "0.7067139", "0.7067139", "0.70563227", "0.7053338", "0.7040583", "0.7039898", "0.7033809", "0.70305496", "0.70276105", "0.7019509", "0.7012379", "0.7002066", "0.69958514", "0.6994939", "0.69934845", "0.69932115", "0.69885206", "0.69803244", "0.69752866", "0.6965668", "0.6958094", "0.6953089", "0.6952", "0.69360787", "0.6932921", "0.69229764", "0.69216", "0.6921265", "0.69202834", "0.69202834", "0.69202834", "0.6913614", "0.69127977", "0.6900276", "0.6896496", "0.6891356", "0.6874056", "0.68692833", "0.68676734", "0.6862358", "0.68608356", "0.68540496", "0.6851554", "0.6844349", "0.6839322", "0.6833228", "0.68239486", "0.68224186" ]
0.0
-1
Pop a value off of the end of the list.
def pop(self): if not self.head: raise IndexError('Cannot pop from empty linked list.') popped_value = self.head.val self.head = self.head.next return popped_value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pop(self): ##################### <-\n value = self.lst[-1]\n self.lst = self.lst[:-1]\n return value", "def pop(self):\n\n value = self.values[0]\n if len(self.values) == 1:\n self.values = []\n else:\n self.populate(self.values[1:])\n return value", "def pop(self):\n popped = self.__list[-1]\n self.__list = self.__list[:-1]\n return popped", "def heap_pop(self, value):\n if value is None or self.get_size() == 0:\n return\n\n if self.find(value) is not None:\n # end of list\n position = self.find(value)\n last = self.get_size() - 1\n\n # pop element and percolate down\n self.swap(position, last)\n self.table.pop()\n self.percolate_down(position)\n return", "def Pop(self):\n # Alternativly use built-in pop()\n #return self.list.pop()\n top = self.list[len(self.list) - 1]\n self.list.remove(top)\n return top", "def pop(self):\n size = self._list.size()\n if size == 0:\n return None\n data = self._list.tail.data\n self._list.removeIndex(size-1)\n return data", "def pop(self):\n value = self.head.value\n try:\n if self.count > 1:\n self.head = self.head.previous\n self.head.next = None\n else:\n self.tail = None\n self.head = None\n self.count -= 1\n except AttributeError:\n AttributeError(u\"DLL is empty.\")\n return value", "def pop(self):\n if self.is_empty():\n raise ValueError('stack is empty')\n else:\n val = self.list.head.data\n self.list.delete(val)\n return val", "def pop_last(self):\n self.pop_item(-1)", "def pop():", "def pop(self): ##################### <-\n value = self.top.value\n self.top = self.top.next\n return value", "def pop(self): ##################### <-\n value = self.top.value\n self.top = self.top.next\n return value", "def pop(self): ##################### <-\n value = self.top.value\n self.top = self.top.next\n return value", "def pop(self):\n current = self.head\n new_head = current.next\n self.head = new_head\n return current.val", "def pop(self):\n if not self.value:\n return\n s = []\n while len(self.value) > 1:\n s.append(self.value.pop())\n peek = self.value.pop()\n while s:\n self.value.append(s.pop())\n return peek", "def pop(self):\r\n return self.list.pop()", "def pop(self):\n b = self.a[-1]\n del self.a[-1]\n return b", "def pop(self):\n if self.end is None:\n return None\n elif self.end == self.begin:\n element = self.begin\n self.begin = self.end = None\n return element.value\n else:\n element = self.begin\n while element.next != self.end:\n element = element.next\n temp = self.end\n self.end = element\n element.next = None\n return temp.value", "def pop(self):\n self.data[0], self.data[-1] = self.data[-1], self.data[0]\n result = self.data.pop()\n self.heapify_down(0)\n return result", "def pop(self):", "def pop(self):", "def pop(self):\n value = self.buffer[self.end - 1]\n self.buffer[self.end - 1] = None\n self.end = (self.end - 1) % len(self.buffer)\n return value", "def pop(self):\n if not self.head:\n raise IndexError(\"Empty list, unable to pop\")\n output = self.head.data\n self.head = self.head.next\n self._counter -= 1\n return output", "def pop(self):\n if self.__size == 0:\n return None\n else:\n data = self.__head.get_data()\n self.__head = self.__head.get_next()\n self.__size -= 1\n return data", "def dequeue(self): ##################### <-\n value = self.lst[0]\n self.lst = self.lst[1:]\n return value", "def pop(self, index=-1):\n # type: (int) -> Any\n return self.value(list.pop(self, index))", "def pop(self):\n self.list.pop()", "def pop(self):\n pass", "def pop(self):\n pass", "def pop(self):\n #print(self.list_x[0])\n #return self.list_x[0]\n #self.list_x.remove(self.list_x[0])\n pop = self.list_x[0]\n self.list_x = self.list_x[1:]\n return pop", "def pop(self):\n return self.list.pop()", "def pop(self):\r\n if self.head is None:\r\n return\r\n output = self.head.value\r\n self.head = self.head.next\r\n return output", "def pop(self) -> object:\n if len(self) <= 0:\n raise EmptyListException(\"The list is empty.\")\n\n pop_node = self._head\n self._head = self._head.next()\n self._len -= 1\n\n return pop_node.value()", "def popitem(self):\n pass", "def pop(self): # 06:30 Lecture Week 2 \"Stacks\" (16:24)\n if self.isEmpty():\n raise Exception(\"Stack underflow\")\n item = self.first.Item # save item to return\n self.first = self.first.Next # delete last Node added\n self.N -= 1\n return item # return the saved item", "def pop(self):\n self._raise_if_empty()\n item = self._top.data\n self._top = self._top.next\n return item", "def pop_back(self):\n if (self._size == 0):\n return None\n\n # Edge case, list has only one element\n # Behave same as pop_front()\n if (self._size == 1):\n return self.pop_front()\n\n output_value = self._tail.value\n\n self._tail = self._tail.prev\n self._tail.next = None\n self._size -= 1\n\n return output_value", "def pop(self):\n raise NotImplementedError", "def pop(self):\n\n if not self.empty:\n i = self._begin\n\n self._begin = (self._begin + 1) % self._capacity\n self._size -= 1\n\n return (self[i])\n else:\n raise ValueError", "def popitem(self):\n return self.pop(0)", "def pop_item(self):\r\n node = self.head\r\n popped_value = int(0)\r\n item_count = self.count_items()\r\n print ('start popping ops, item count = ' + str(item_count))\r\n\r\n if item_count == 0:\r\n print('\\nbut there is none in the list')\r\n return None\r\n\r\n elif item_count == 1:\r\n print('POPPING_ITEM: there is only ONE item left in the list!')\r\n popped_value = node.get_value()\r\n self.head = None\r\n\r\n elif item_count == 2:\r\n print('POPPING_ITEM: there is only TWO item left in the list!')\r\n node_tail = node.get_next()\r\n node_head = node\r\n node = node.get_next()\r\n popped_value = node.get_value()\r\n node_head.set_next(None)\r\n\r\n elif item_count > 2:\r\n for i in range(item_count - 2):\r\n node_2nd_last = node.get_next()\r\n node = node.get_next()\r\n\r\n node = node.get_next()\r\n popped_value = node.get_value() # retrieve value from\r\n node_2nd_last.set_next(None) # del\r\n print ('popped value = ' + str(popped_value))\r\n return popped_value", "def del_value(self):\n return self.list.pop()", "def dequeue(self):\n if self.is_empty():\n raise ValueError('stack is empty')\n else:\n val = self.list.head.data\n self.list.delete(val)\n return val", "def pop(self):\n item = self.stack[-1]\n self.stack = self.stack[:-1]\n return item", "def popitem(self): # real signature unknown; restored from __doc__\n pass", "def pop(self):\n if self.head is None:\n return None\n else:\n data = self.head._data\n self.head = self.head._next\n self.count -= 1\n return data", "def pop(self):\n if self.is_empty():\n raise Empty('Stack is empty!')\n last = (self._front + self._size - 1) % len(self._data)\n rlt = self._data[last]\n self._data[last] = None # help gc\n self._size -= 1\n # resize the underlying list\n if self._size < len(self._data) // 4:\n self._resize(len(self._data) // 2)\n return rlt", "def pop_back(self):\n if self.head is None:\n raise IndexError('pop_back to empty list')\n node = self.tail \n if node.prev_node is None:\n self.head = None\n else:\n node.prev_node.next_node = None\n self.tail = node.prev_node\n return node.value", "def pop(self):\n\t\tif self.is_empty():\n\t\t\traise Empty('Stack is empty')\n\t\tanswer = self._head._element\n\t\tself._head = self._head._next \n\t\tself._size -= 1\n\t\treturn answer", "def pop(self) -> int:\n self._aux()\n ret = self.q1.popleft()\n self.q1, self.q2 = self.q2, self.q1\n self.size -= 1\n return ret", "def pop(self, index=-1):\n print(\"before\", self._list)\n popped_value = self._list[index]\n\n copy = self._list\n print(\"copy\", copy)\n\n self._list = np.empty([4], np.int16)\n print(\"new self.list\", self._list)\n\n if index == -1:\n new_array = np.empty([4], np.int16)\n print(\"new array\", new_array)\n for index in range(self._size - 1):\n new_array[index] = self._list[index]\n index += 1\n self._list = new_array\n self._size = index\n print(self._list)\n return popped_value\n\n while index < self._size:\n self._list[index] = self._list[index + 1]\n index += 1\n print(\"after \",self._list)\n return popped_value", "def pop(self):\n if self.head is None:\n raise IndexError(\"Cannot pop from an empty linked list.\")\n first = self.head.val\n self.head = self.head.next\n self._length -= 1\n return first", "def pop(self):\n if self.is_empty():\n raise ValueError\n\n item = self.linked_list.head\n self.linked_list.head = item.next\n\n item.next = None\n\n self.linked_list.node_count -= 1\n\n return item.data", "def pop(self):\n temp = self.tail.prev\n self.tail.prev = None\n self.tail = temp\n self.tail.next = None\n self.len -= 1", "def pop(self, pos: int = -1):\n idx = pos if pos >= 0 else len(self) + pos\n if not 0 <= idx < len(self): raise KeyError('Index out of bound!')\n\n p = self.head\n while idx > 0: # stop before pos\n p = p.next\n idx -= 1\n\n # be careful when pop the last element we must change the self.tail\n if p.next is self.tail:\n self.tail = p\n\n # be careful that pop from an empty Linkedlist\n if p.next is not None:\n res = p.next.data\n p.next = p.next.next\n self._size -= 1\n else:\n res = None\n\n return res", "def pop(self):\n if self.n == 0:\n raise ValueError(\"Heap is empty\")\n value = self.ar[0]\n self.n -= 1\n self.ar[0] = self.ar[self.n]\n self.heapify(0)\n return value", "def pop(self):\r\n\r\n if not self.is_empty():\r\n\r\n half_cap = self._capacity // 2\r\n item = self._data[self._size-1]\r\n self._data[self._size-1] = 0\r\n self._size -= 1\r\n\r\n if self._size <= half_cap:\r\n if half_cap != 0:\r\n\r\n self.shrink()\r\n\r\n return item\r\n\r\n else:\r\n pass", "def pop(self, index):\r\n if index < 0 or index >= self.size():\r\n raise IndexError(\"Array index out of bounds\")\r\n itemToReturn = self._items[index]\r\n # Shift items up by one position\r\n for i in range(index, self.size() - 1):\r\n self._items[i] = self._items[i + 1]\r\n # Reset empty slot to fill value\r\n self._items[self.size() - 1] = self._fillValue\r\n self._logicalSize -= 1\r\n if self.size() <= len(self) // 4 and len(self) > self._capacity:\r\n self.shrink()\r\n return itemToReturn", "def pop(self):\n try:\n res = self._linkedlist.pop()\n self._update_attr()\n return res\n except IndexError:\n raise IndexError(\"Cannot pop from empty stack.\")", "def pop(self, index: int) -> Any:\n # If empty raise indexerror\n if self.is_empty():\n raise IndexError\n # Pop at the beginning of the list.\n elif index == 0:\n item = self._first\n # modify self._first\n self._first = self._rest._first\n self._rest = self._rest._rest\n return item\n # Recursive case\n else:\n if not self._rest:\n raise IndexError\n return self._rest.pop(index - 1)", "def pop(self):\n if self.items:\n return self.items.pop()\n return None", "def pop(self):\n\n traverse = self.head\n\n if self.head == None:\n return -1\n\n if self.head.next == None:\n self.head = None\n\n return traverse.data\n\n while traverse.next is not None:\n\n t1 = traverse.next\n if t1.next is None:\n traverse.next = None\n\n return t1.data\n traverse = traverse.next", "def pop(self):\n pass", "def pop(self):\n if self.isEmpty():\n raise KeyError(\"The queue is empty.\")\n oldItem = self._front.data\n self._front = self._front.next\n if self._front is None:\n self._rear = None\n self._size -= 1\n return oldItem", "def pop(self) -> int:\r\n return self.items.pop(0)", "def pop(self):\n return self.popleft()", "def pop(self):\n\n if self.top is None:\n raise IndexError(\"pop from empty stack\")\n return_node = self.top\n self.top = self.top.next\n return_node.next = None\n self._len -= 1\n return return_node.value", "def pop(self) -> T:\n pass", "def pop(self) -> int:\n if self.empty():\n raise RuntimeError(\"Queue is empty!\")\n result = self.data[self.head]\n self.data[self.head] = None\n self.head = (self.head + 1) % len(self.data)\n self.size -= 1\n if 0 < self.size < len(self.data) // 4 and len(self.data) > 10:\n self.resize(len(self.data) // 2)\n return result", "def pop(self) -> int:\n tmp_list = ArrayStack(10)\n res = None\n for i in range(self.data.get_size()):\n if self.data.get_size() == 1:\n res = self.data.pop()\n else:\n tmp_list.push(self.data.pop())\n self.data = ArrayStack(10)\n for i in range(tmp_list.get_size()):\n self.data.push(tmp_list.pop())\n return res", "def pop(self):\n while len(self.values) > 1:\n self.temp.append(self.values.pop())\n popped = self.values.pop()\n while self.temp:\n self.push(self.temp.pop())\n return popped", "def pop(self):\n array = self.array\n item = array[0] \n if len(array) == 1:\n del array[0]\n else:\n compare = self.compare\n del self.pos[array[0]] \n array[0] = array.pop()\n self.pos[array[0]] = 0\n low, high = 0, 1\n while high < len(array):\n if ((high+1 < len(array)\n and compare(array[high], array[high+1]) > 0)):\n high = high+1\n if compare(array[low], array[high]) <= 0:\n break\n self.pos[array[high]] = low\n self.pos[array[low]] = high \n array[low], array[high] = array[high], array[low]\n low, high = high, 2*high+1\n return item", "def pop(self):\n return self.ll.delete_first()", "def pop(self):\n\t\treturn self.items.pop()", "def pop(self):\n return self.List_store.pop()", "def pop(self):\n if self.begin == None:\n return None\n\n elif self.begin == self.end:\n node = self.begin\n self.end = self.begin = None\n\n return node.value\n\n else:\n node = self.begin\n while node.next != self.end:\n node = node.next\n self.end = node\n return node.next.value", "def popitem(self):\n pass", "def pop_back(self):\n if self.n==0:\n print(\"Error; empty list\")\n return\n else:\n temp_node = self.head\n # until temp_node is final-1 node\n while temp_node.next.next is not None:\n temp_node = temp_node.next\n temp = temp_node.next\n temp_node.next = None # reset the tail node\n self.n -= 1 # decrement\n return temp.val", "def pop(self):\n\n if self.items:\n return self.items.pop()\n\n return None", "def pop(self, index=None, last=True):\n if index == None:\n return super().pop(last)\n else:\n ret = self[index]\n self.remove(ret)\n return ret", "def popitem(self):\r\n while 1:\r\n key, value = self.data.popitem()\r\n o = key()\r\n if o is not None:\r\n return o, value", "def pop(self):\n return self.remove(0)", "def pop(self):\n if self.is_empty():\n raise RuntimeError(\"Attempt to pop the empty stack!\")\n item = self.top()\n self._items = self._items[:-1]\n return item", "def pop(self):\n\n traverse = self.head\n\n while traverse.next is not None:\n\n t1 = traverse.next\n if t1.next is None:\n traverse.next = None\n return t1.data\n traverse = traverse.next", "def pop(self):\n removed_node = self.top\n self.top = self.top._next\n self._size -= 1\n return removed_node.val", "def pop(self):\n if len(self.priority_queue.values()):\n nextkey = 0\n while nextkey not in self.priority_queue:\n nextkey += 1\n up_next = self.priority_queue[nextkey][0]\n self.priority_queue[nextkey] = self.priority_queue[nextkey][1:]\n return up_next\n else:\n raise IndexError(\"There's nothing in your queue\")", "def pop(self):\n\n assert self.size > 0, \"Cannot pop item! The MaxHeap is empty!\"\n ret = self.items[0]\n self.items[0] = self.items[self.size - 1]\n self.items[self.size - 1] = None\n self.size -= 1\n self._shift_down(0)\n return ret", "def pop(self):\n if self._pushed:\n rv = self._pushed.pop()\n else:\n rv = self._generator.next()\n self.last = rv\n return rv", "def pop(self) -> int:\n if len(self.a) != 0:\n while len(self.a) != 1:\n self.topvalue = self.a.popleft()\n self.b.append(self.topvalue)\n\n return self.a.popleft()\n else:\n while len(self.b) != 1:\n self.topvalue = self.b.popleft()\n self.a.append(self.topvalue)\n return self.b.popleft()", "def pop(self, i=None):\n if i is None:\n i = len(self) - 1\n val = self[i]\n del self[i]\n return val", "def pop(self):\r\n it = iter(self)\r\n try:\r\n value = next(it)\r\n except StopIteration:\r\n raise KeyError\r\n self.discard(value)\r\n return value", "def pop(self):\n try:\n return self._values.pop()\n except IndexError:\n raise IndexError('Cannot pop from empty deque.')", "def pop(self):\n if self.is_empty():\n raise Empty('Stack is empty')\n result = self._head._element\n self._head = self._head._next # bypass the former top node\n self._size -= 1\n return result", "def pop(self):\n self._a_to_b()\n r = self.b[-1]\n self.b.pop()\n return r", "def pop(self):\n\n if not self._list:\n raise StackEmptyError()\n\n return self._list.pop()", "def peek(self):\n s = []\n while self.value:\n s.append(self.value.pop())\n p = s[-1]\n while s:\n self.value.append(s.pop())\n return p", "def popitem(self):\r\n while True:\r\n key, value = self.data.popitem()\r\n o = key()\r\n if o is not None:\r\n return o, value", "def pop_first(self):\n self.pop_item(0)", "def pop(self) -> int:\n return self.q.popleft()", "def pop_front(self):\n if (self._size == 0):\n return None\n\n output_value = self._head.value\n\n self._head = self._head.next\n self._head.prev = None\n self._size -= 1\n\n # Edge case, list is now empty\n if (self._size == 0):\n self._tail = None\n\n return output_value" ]
[ "0.84853077", "0.79199415", "0.7871684", "0.77542204", "0.7659789", "0.76190156", "0.7606379", "0.7602611", "0.75503474", "0.75462514", "0.7535589", "0.7535589", "0.7535589", "0.75355107", "0.74785006", "0.74628246", "0.7460463", "0.744531", "0.7444407", "0.7429238", "0.7429238", "0.74142146", "0.7414094", "0.73960835", "0.7391787", "0.7362344", "0.73528117", "0.73431766", "0.73431766", "0.73390275", "0.7328126", "0.72645444", "0.7258892", "0.7254624", "0.7235399", "0.7233758", "0.7217659", "0.71866256", "0.71552914", "0.7150006", "0.7149212", "0.71361005", "0.71306384", "0.7111076", "0.710508", "0.71038544", "0.70881253", "0.7068247", "0.704304", "0.70354104", "0.70343673", "0.7028653", "0.70234627", "0.7007564", "0.6987378", "0.6986992", "0.6980232", "0.69737816", "0.6933798", "0.6926067", "0.69256204", "0.6924694", "0.6924211", "0.6920113", "0.6914973", "0.69132817", "0.69106376", "0.68983775", "0.68954563", "0.68933225", "0.68910706", "0.6885375", "0.6882921", "0.6858562", "0.6857438", "0.68556964", "0.6851737", "0.6849673", "0.684857", "0.6843588", "0.683487", "0.6834466", "0.6826733", "0.68244886", "0.68214357", "0.68194085", "0.68191636", "0.68164504", "0.68111193", "0.68108135", "0.6805844", "0.6805261", "0.6798868", "0.67983896", "0.6796517", "0.67943734", "0.6791648", "0.6791564", "0.6790357", "0.6773792" ]
0.7111202
43
Search through the linked list.
def search(self, val): if not self.head: raise IndexError('Cannot search empty list.') current_node = self.head while current_node: if current_node.val == val: return current_node current_node = current_node.next
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def search(self, ele):\n if self.head:\n current = self.head\n while True:\n\tif current.data == ele:\n\t return True\n\tcurrent = current.next\n\tif current == self.head:\n\t break\n return False", "def search(self, data):\r\n if self.head is None:\r\n pass\r\n current = self.head\r\n found = False\r\n while not found and current:\r\n pass\r\n if not found:\r\n print(\"Data not found.\")\r\n return current", "def search(self, val):\n search_through = self.head\n while search_through:\n if val == search_through.data:\n return search_through\n else:\n search_through = search_through.next\n return search_through", "def search(self, data):\n\n current = self.head\n found = False\n while current and found is False:\n if current.data == data:\n print(f'Data {data} was found')\n found = True\n else:\n current = current.next\n if current is None:\n raise ValueError(\"Data not in list\")\n return current", "def search(self, item):\n \n current = self.head\n \n while current is not None:\n if current.get_data() == item:\n return True\n else:\n current = current.get_next()\n \n return False", "def search(self, val):\n current = self.head\n # import pdb; pdb.set_trace()\n while current is not None:\n if current.data == val:\n return current\n current = current.next_node\n return None", "def search(self, item):\n \"\"\"\n :type item: Node()\n :rtype Boolean\n \"\"\"\n curr = self.head\n while curr:\n if curr.getData() == item:\n return True\n curr = curr.getNext()\n return False", "def search(self, x):\n temp = self.head\n while temp:\n if temp.data == x:\n return True\n temp = temp.next\n return False", "def search(self, x):\n temp = self.head\n\n for j in range(0,i):\n if temp.next==None:\n return\n if (x == temp.value):\n print(\"Found\")\n return \n temp = temp.next\n\n print(\"Not Found\")", "def search_item(self, data):\n\n traverse = self.head\n while traverse.next != None:\n\n if traverse.data == data:\n return True\n traverse = traverse.next\n if traverse.data == data:\n return True\n else:\n return False", "def search_item(self, data):\n\n traverse = self.head\n if self.head == None:\n return False\n\n while traverse.next != None:\n\n if traverse.data == data:\n return True\n traverse = traverse.next\n if traverse.data == data:\n return True\n else:\n return False", "def search(self, element):\n current = self.head\n while current and current.data != element:\n current = current.next\n return current", "def search(self, val):\n current = self.head\n found = False\n while current and not found:\n if current.val == val:\n found = True\n return current\n current = current.next\n return None", "def search(self, val):\n search = self.head\n while search:\n if search.val == val:\n return search\n search = search.next\n return None", "def find(self, item):\n current = self.head\n while current.next != None:\n if current.data == item:\n return current\n current = current.next", "def search_recursive(self, llist, key):\n if not llist:\n return False\n if llist.data == key:\n return True\n return self.search_recursive(llist.next, key)", "def search(self, key):\n\n current = self.head\n\n while current:\n if current.data == key:\n return current\n else:\n current = current.next_node\n\n return None", "def search(self, value):\n return self._search(self.head, value)", "def search_list(lst, key):\n while lst and lst.data != key:\n lst = lst.next\n return lst # if list is null or key not found, None is returned", "def test_linked_list_search_success(new_ll):\n from linked_list import Linked_List\n result = new_ll.search('pear')\n assert result.value == 'pear'", "def search(self, item):\n \n current = self.head\n \n while current is not None:\n if current.get_data() == item:\n return True\n # Early stop by taking advantage of ordering \n elif current.get_data() > item:\n return False\n else:\n current = current.get_next()\n \n return False", "def look_up(self, val):\n index = 0\n if self.head is None:\n print(\"List is empty\")\n start = self.head\n while start is not None:\n if start.data == val:\n return index\n start = start.next\n index += 1\n return \"No such element\"", "def search(self, value):\r\n node = self.head\r\n while node:\r\n if node.value == value:\r\n return node\r\n node = node.next\r\n raise ValueError('Value not found')", "def find(self, key):\n curr = self.head\n while curr and curr.data != key:\n curr = curr.next\n return curr # Will be None if not found", "def find(self, args):\n curpl = self.ui.leftwin.highlighted().data\n if not args:\n if not self.find_list:\n self.err_print('At least one argument required')\n return\n else:\n term = args[0]\n if len(args) == 1:\n key = curpl.sort_key\n elif len(args) > 1:\n key = args[1]\n if key not in song.tags:\n self.err_print('Invalid key: ' + key)\n return\n\n self.find_list = (ii for ii, item in enumerate(curpl.data) if item[key] == term)\n\n try:\n ind = next(self.find_list)\n except StopIteration:\n self.err_print('Not found.')\n return\n\n self.ui.jump_to_ind(ind, len(curpl.data), self.ui.rightwin)\n\n self.ui.switch_view_right()", "def search():\n pass", "def _search(cls, node, value):\n if node is None:\n return False\n\n if node.value == value:\n return True\n\n return cls._search(node.next_, value)", "def find(self, find_state):\n\t\tpointer = self.head\n\t\tif pointer.state == find_state:\n\t\t\treturn pointer.state\n\t\telse:\n\t\t\twhile pointer.next != None:\n\t\t\t\tpointer = pointer.next\n\t\t\t\tif pointer.state == find_state:\n\t\t\t\t\treturn pointer.state\n\t\tprint \"Sorry, your item was not found\"", "def search(self, search):\n raise NotImplementedError", "def search_node(self, data):\n\t\tif self.root is None:\n\t\t\traise EmptyRootException(\"ERROR: No node available in list. Please insert node in list.\")\n\t\tcurrent_node = self.root\n\t\twhile current_node is not None:\n\t\t\tif current_node.data == data:\n\t\t\t\tprint \"Node with data %s has been found in list\" % data\n\t\t\t\treturn\n\t\t\tcurrent_node = current_node.next\n\t\tprint \"Node with data %s not found in list\" % data", "def linear_search(vlist, srchval): # somewhat different from book\n#Look at each item in list. If it equals the value you are looking for, stop.\n # linear_search_2.py\n index = 0\n for item in vlist:\n if item == srchval:\n return index # implicit break\n index += 1\n \n return -1", "def search(self, word: str) -> bool:\n node = self.head\n for c in word:\n if c not in node.next:\n return False\n node = node.next[c]\n return node.valid", "def search(self, find_val):\n return False", "def search(self, target):\n if DEBUG: print('search({})'.format(target))\n\n result = False\n\n cur = self.head\n \n output = \"\\tPath: \"\n \n while cur:\n output += \"{}\".format(cur.val)\n if not cur.next and not cur.below:\n output += \" END\"\n break\n elif cur.next == None or\\\n target < cur.next.val:\n cur = cur.below\n output += \" v \"\n elif cur.next.val == target:\n result = True\n output += \" -> {}! FOUND\".format(target)\n break\n elif target > cur.next.val:\n output += \" -> \"\n cur = cur.next\n else:\n print(\"\\thow did i get here\")\n\n if DEBUG: print(output)\n if DEBUG: print('\\t{}'.format(result))\n return result", "def contains(self, key: str) -> SLNode:\n cur = self.head\n while cur is not None:\n if cur.key == key:\n return cur\n cur = cur.next\n return cur", "def contains(self, key: str) -> SLNode:\n cur = self.head\n while cur is not None:\n if cur.key == key:\n return cur\n cur = cur.next\n return cur", "def contains(self, key):\n if self.head is not None:\n cur = self.head\n while cur is not None:\n if cur.key == key:\n return cur\n cur = cur.next\n return None", "def contains(self, key):\n if self.head is not None:\n cur = self.head\n while cur is not None:\n if cur.key == key:\n return cur\n cur = cur.next\n return None", "def search(self, item):\n current = self._head\n # search until we find it or fall off the end\n while current != None:\n if current.getData() == item:\n # item has been found\n return True\n else:\n if current.getData() > item:\n # We’ve passed where the item could be.\n # Only works for ordered lists.\n return False\n else:\n current = current.getNext()\n return False", "def find(self, key):\n if self.head is None:\n return\n itr = self.head\n while itr:\n if itr.data == key:\n return itr.data\n itr = itr.next\n return None", "def __contains__(self, item):\n cur_node = self.head\n while cur_node is not None:\n if item in cur_node.data_list:\n return True\n else:\n cur_node = cur_node.next_node\n\n return False", "def find(self, p):\n pass", "def contains(self, key):\r\n\t\tif self.head is not None:\r\n\t\t\tcur = self.head\r\n\t\t\twhile cur is not None:\r\n\t\t\t\tif cur.key == key:\r\n\t\t\t\t\treturn cur\r\n\t\t\t\tcur = cur.next\r\n\t\treturn None", "def find(self, key):\n curr_node = self.head\n\n while curr_node is not None: # a normal traversal and checking first match\n if curr_node.data == key:\n return curr_node\n curr_node = curr_node.next\n\n return None", "def contains(self, d):\n\n temp = self.head\n while temp is not None:\n if temp.data == d:\n return True\n else:\n temp = temp.next\n\n return False", "def includes(self,value):\n try:\n current = self.head\n while current.next != None:\n if current.value == value:\n return True\n else:\n current = current.next\n return False\n except Exception as error:\n print (f\"There is error in __init__ of LinkedList, the error {error}\")", "def search(self, data):\n index = self.hash_function(data)\n return self.objects_list[index].search_item(data)", "def find(self, data):\n index = 0\n current = self.head\n while current:\n if current.data == data:\n return index\n index += 1\n current = current.next\n\n return -1", "def test_linked_list_search_failure(new_ll):\n from linked_list import Linked_List\n result = new_ll.search('owt')\n assert result is None", "def includes(self,data):\n curent=self.head\n while curent :\n\n\n if curent.data==data:\n curent= curent.next\n return True\n\n else:return False", "def search(self, value):\n pass", "def find_an_item_in_list(self, level):\n for element in self.list:\n element.find_an_item(element, level)", "def linear_search(L, key):\r\n for element in L:\r\n if element == key:\r\n return True\r\n return False", "def find(self, item):\n cur = self.sentinel.next\n while cur is not self.sentinel:\n if cur.item == item:\n return cur\n cur = cur.next\n return None", "def search_keyword(self,keyword):\n for entry in self.available_fields_list:\n for x in entry:\n if keyword in x:\n print(entry)\n break\n return", "def search(self, term):", "def search(self, key):\n return self.find_iterative(self.root,key)", "def continue_search( self ):\n return True;", "def sequential_search(a_list, item):\n pos = 0\n found = False\n\n while pos < len(a_list) and not found:\n if a_list[pos] == item:\n found = True\n else:\n pos += 1\n return found", "def find(ss, list_seq):\n\tfor item in list_seq:\n\t\tif item in ss:\n\t\t\treturn True\n\treturn False", "def search_key(self, key:str) -> bool:\n \n hash_key = self.hash_key(key)\n head = self.array[hash_key]\n \n while head.next:\n if head.next.key == key:\n return True\n head = head.next\n \n return False", "def search_next(self):\n self._raise_not_supported()", "def search(self, word):", "def preorder_search(self, start, find_val):\n return False", "def find_by_exact_match(self):\n while True: \n self.task_name_search = input(\"What is the keyword/s you are looking\"\n \" for? Press Q to quit to the main screen: \").strip()\n if self.task_name_search.upper() in [\"Q\", \"QUIT\", \"EXIT\"]:\n x = self.dict_list\n return x\n self.find_by_exact_match_list = []\n count = 0\n for i in self.dict_list:\n for key, value in i.items():\n if re.search(self.task_name_search, value):\n self.find_by_exact_match_list.append(i)\n count+=1\n break\n if count == 0:\n print(\"There were no matches.\")\n else:\n self.display_style(self.find_by_exact_match_list)\n break\n self.del_or_edit()", "def linear_search(data, target):\n \n for i in range(len(data)):\n if data[i] == target:\n print(f\"Found {target} at index {i}\")\n return True\n print(f\"Item {target} not found\")\n return False", "def search(self, e):\n return e in self.table[hash(e) % len(self.table)]", "def find(self):\n raise NotImplementedError", "def search(self, *args, **kwargs): # real signature unknown\n pass", "def search(self, key):\n if key in self.key_list:\n return (self.nodes)[key]\n return None", "def search(self, q):\n if len(q) > self.n: #checks to see if the length of q is larger than n\n raise Exception(\"q cannot be larger than n\") #raises an exception if it is\n return mybinsearch(self.sortedList, q, self.ststr) >= 0 # returns True if q is found in the list and False if it's not", "def __findLinkedUp(self, node):\n item=self.first\n while item and item.getNext():\n if item.getNext().value() == node.value(): return item\n item = item.getNext()\n return None", "def __contains__(self, item: Any) -> bool:\n curr = self._first\n\n while curr is not None:\n if curr.item == item:\n return True\n\n curr = curr.next\n\n return False", "def search(self, word: str) -> bool:\n node = self\n for c in word:\n node = node.d.get(c)\n if not node:\n return False\n return node.end", "def _sequential_search(list_of_items, search_item):\n # boolean variable to indicate whether the search item is found\n # indicator variable is initialize to false\n found = False\n # running index\n index = 0\n # while we have not run out of list items and item is not found\n while index < len(list_of_items) and not found:\n # if the search item is found then set indicator variable to True\n if list_of_items[index] == search_item:\n found = True\n # else increment the running index\n else:\n index = index+1\n # return the indicator variable\n return found", "def linearSearch(list, num):\n found = False\n for x in list:\n if x == int(num):\n found = True\n break\n if found:\n print('match found')\n if not found:\n print('no match found')", "def search(self, *args, **kwargs):", "def find(function, seq):\r\n for item in seq:\r\n if function(item): \r\n return item\r\n return None", "def find(func, list_seq):\n for list_item in list_seq:\n if func(list_item):\n return list_item", "def search(self, word):\n curNode = self.root\n for c in word:\n if not c in curNode:\n return False\n curNode = curNode[c]\n \n # Doesn't end here\n if not self.end in curNode:\n return False\n \n return True", "def has_at_index(self, index):\n count = 0\n start = self.head\n while start:\n if count==index:\n return start\n start = start.getLink()\n count+=1\n return None", "def search(self):\r\n #get the initial state\r\n initialState = State()\r\n \r\n #create root node\r\n rootNode = Node(initialState)\r\n \r\n #show the search tree explored so far\r\n treeplot = TreePlot()\r\n treeplot.generateDiagram(rootNode, rootNode)\r\n \r\n #perform search from root node\r\n self.performBacktrackSearch(rootNode, rootNode)\r\n \r\n rootNode.printTree()", "def linear_search_iterative(alist, target):\n index_target = None\n found = False\n index_current = 0\n while index_current < len(alist) and found is False:\n if alist[index_current] == target:\n index_target = index_current\n found = True\n index_current += 1\n return index_target", "def includes(self, value):\n current = self.head\n\n while current is not None:\n if current.value == value:\n return True\n current = current.next\n return False", "def includes(self, value):\n current = self.head\n while current:\n if current.value == value:\n return True\n else:\n current = current.next\n return False", "def search(self, query):", "def search(self, word):\n now = self.tree\n for i in word:\n if i in now:\n now = now[i]\n else:\n return False\n return True if 'end' in now else False", "def search(self, find_val):\n return self.preorder_search(self.root, find_val)", "def search(self, find_val):\n return self.preorder_search(self.root, find_val)", "def linear_search(key, my_list):\n key = word.upper()\n my_list = dictionary_list\n if key in my_list:\n if not key:\n print(word)", "def search(self, val):\n currentNode = self.rootNode\n while True:\n if currentNode is None:\n print(\"Number not found.\")\n return None\n elif currentNode.val == val:\n print(\"Number found.\")\n return currentNode\n elif currentNode.val < val:\n currentNode = currentNode.right\n else:\n currentNode = currentNode.left", "def linear_search(lst, value):\n i = 0\n while i != len(lst) and lst[i] != value:\n i = i + 1\n if i == len(lst):\n return -1\n else:\n return i", "def _key_list_search(self, keys_list, lookup_dict):\n for index, key in enumerate(keys_list):\n result = nested_lookup(key, lookup_dict)\n try:\n value = nested_lookup(keys_list[index + 1], result)\n except IndexError:\n pass\n return value", "def search(self, word):\n current = self.root\n for i in word:\n if current.hash_map.get(i) is None:\n return False\n current = current.hash_map.get(i)\n if current.num != 0:\n return True\n return False", "def linear_search(list, target):\n for i in range (0, len(list)):\n if list[i] == target:\n return i\n\n\n return None", "def runSearch():\n\tglobal processLanguageOn\n\tdataToFind=getDataFromWidget(podSearchEntry)\n\t#Search through the keys otherwise data changes\n\tdataSource=podListbox.data.keys()\n\t#Store the results of the search\n\tresults=[]\n\t#Search the data source\n\tfor item in dataSource:\n\t\tif searchDataSource(dataToFind,[item],capital=True,full=False):\n\t\t\tresults.append(item)\n\n\t#Add the results to screen\n\tpodListbox.delete(0,END)\n\tfor item in results:\n\t\tpodListbox.addExisting(item)\n\n\tif processLanguageOn:\n\t\tprocessSearchLanguage(podSearchEntry)", "def processSearchResult(self):", "def search(self):\n timed_print(\"Searching\", randint(3,7))\n if len([i for i in self.notes if not i.hidden]) == 0:\n return \"Nothing here\"\n else:\n for i in self.notes:\n screen_clear()\n i.show()\n print()\n input(\"Press Enter to continue searching\")\n return \"Nothing else here!\"", "def search(self, word: str) -> bool:\n # Checking if the word is present in the list.\n return word in self.mylist", "def sequentialSearch(target, lyst):\n position = 0\n while position < len(lyst):\n if target == lyst[position]:\n return position\n position += 1\n\n return -1" ]
[ "0.7565156", "0.751804", "0.74692243", "0.7460926", "0.7455549", "0.7395807", "0.7327136", "0.73222226", "0.72867405", "0.7258646", "0.7242936", "0.72091985", "0.7195228", "0.7178875", "0.70211923", "0.6858696", "0.6825057", "0.68024164", "0.6769936", "0.6705504", "0.67019075", "0.6627802", "0.66231114", "0.6589883", "0.65263456", "0.64923286", "0.64908993", "0.64652264", "0.64159584", "0.6412143", "0.6396773", "0.6395456", "0.6385119", "0.63331497", "0.632641", "0.632641", "0.63154286", "0.63154286", "0.6298496", "0.62333155", "0.6228041", "0.6227051", "0.6213537", "0.62132466", "0.6185107", "0.6155004", "0.6142988", "0.6140762", "0.6139529", "0.6097426", "0.60907507", "0.60879654", "0.608011", "0.6074133", "0.60612446", "0.60350347", "0.60329145", "0.6030677", "0.60128224", "0.5998616", "0.59904754", "0.5989784", "0.5982927", "0.59740263", "0.59524655", "0.59412426", "0.5916817", "0.5912689", "0.590878", "0.5903447", "0.5889277", "0.58760226", "0.586721", "0.586224", "0.58609444", "0.58569133", "0.58258456", "0.58178633", "0.5810703", "0.57922494", "0.5772159", "0.57577485", "0.57557476", "0.5752974", "0.5743963", "0.5741899", "0.57373506", "0.57369363", "0.57369363", "0.5734052", "0.5732925", "0.5732547", "0.5730625", "0.5722545", "0.5721791", "0.5707884", "0.5703728", "0.5702986", "0.56986946", "0.5695221" ]
0.7163797
14
Remove a value from the linked list.
def remove(self, val): current_node = self.head previous_node = None while current_node: if current_node.val == val: if previous_node: previous_node.next = current_node.next else: self.head = current_node.next previous_node = current_node current_node = current_node.next
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove(self, value):\r\n if self.head is None:\r\n return\r\n\r\n if self.head.value == value:\r\n self.head = self.head.next\r\n return\r\n\r\n node = self.head\r\n while node.next:\r\n if node.next.value == value:\r\n node.next = node.next.next\r\n return\r\n node = node.next", "def remove_value(self, value):\n if self.head is None: \n raise ValueError('Deleting from empty list.')\n node = self.head \n if node.value == value: \n self.head = self.head.next_node \n return node \n while node.next_node is not None:\n current = node.next_node \n if current.value == value:\n node.next_node = current.next_node \n return current \n node = current\n raise ValueError('Deleting non-existing value.')", "def remove(self,value):\n if self.is_empty():\n return\n current = self._head\n if current.value == value:\n self._head = self._head.next\n elif current.next is None:\n # Contains one element only, but it is not the one we are looking for.\n return\n else:\n while current.next.value != value:\n current = current.next\n if current.next is None: # Remove value not found.\n return\n\n # Find removed value, remove it.\n current.next = current.next.next\n if current.next is None:\n self._tail = current\n self._size -= 1", "def remove_value(self, value):\n if self.head is None: \n raise ValueError('Deleting from empty list.')\n node = self.head \n if node.value == value: \n self.head = self.head.next_node \n if self.head is None: \n self.tail = None\n else:\n self.head.prev_node = None \n return node \n while node.next_node is not None:\n node = node.next_node \n if node.value == value:\n node.prev_node.next_node = node.next_node \n if node.next_node is None: \n self.tail = node.prev_node \n else:\n node.next_node.prev_node = node.prev_node\n return node\n raise ValueError('Deleting non-existing value.')", "def remove(self, value):\n node = self.first()\n # case 1 : in case of empty list, do nothing and return None\n if node is None:\n return None\n # case 2 : list has at least one element and node to be removed is the first element\n if node.value() == value:\n self.__head = node.next()\n self.__length -= 1\n node.set_next(None)\n return node\n # case 3 : list has at least one element and node to be removed is not the first element\n previous = node\n node = node.next()\n while node is not None:\n if node.value() == value:\n previous.set_next(node.next())\n self.__length -= 1\n node.set_next(None)\n return node\n else:\n node = node.next()\n return None\n\n ##############", "def delete(self, value):\n current = self.head\n prev = None\n\n while current:\n if current.value == value:\n if prev == None:\n self.head = current.next\n else:\n prev.next = current.next\n break\n prev = current\n current = current.next", "def delete(self, value):\n current = self.head\n if current.value == value:\n self.head = current.next\n else:\n while current:\n if current.value == value:\n break\n prev = current\n current = current.next\n if current == None:\n return\n prev.next = current.next\n current = None", "def delete(self, value):\n current = self.head\n previous = None\n while current.value != value and current.next:\n previous = current\n current = current.next\n if current.value == value:\n if previous:\n previous.next = current.next\n else:\n self.head = current.next\n pass", "def remove_value(self, value):\n # check the head's key\n temp_node = self.head\n if temp_node.val==value:\n self.head = temp_node.next\n temp_node = None\n self.n -= 1\n return\n\n # search for the key value\n while temp_node.val != value: # check the next node's key\n prev_node = temp_node # store prev node to change prev.next\n temp_node = temp_node.next\n # if the key is not found\n if temp_node == None:\n print(\"Error; key value is not found\")\n return\n else:\n # reconfigure; unlink the current node\n prev_node.next = temp_node.next\n temp_node = None\n self.n -= 1", "def remove_by_value(self, data):\n pre_node = None\n for n in self:\n if n.data == data:\n if pre_node is None:\n self.pop()\n else:\n pre_node.next = n.next\n break\n pre_node = n\n else:\n raise ValueError(f'value [{data}] not found in linked list')", "def remove(self, value):\n list.remove(self, value)\n self.emit('removed', value)\n self.emit('modified')", "def remove_node(self, value):\n node = self.head\n\n while node:\n if self.head.value == value:\n self.head = self.head.next\n return\n if node.next.value == value:\n node.next = node.next.next\n return\n node = node.next", "def remove_value(self, value):\n if self.empty():\n return \"Linked List is empty\"\n h = self.head\n previous = self.head\n idx = 0\n while h is not None:\n if h.data is value:\n if previous is h:\n self.head = h.next\n return idx\n else:\n previous.next = h.next\n h = None\n return idx\n idx += 1\n previous = h\n h = h.next\n\n pass", "def remove(self, value):\n\t\tself.__remove(self, value, None)", "def remove(self, value):\n\n list.remove(self, value)\n self.changed()", "def remove(self, value):\n pass", "def delete(self, value):\n current = self.head\n index = 1\n ''' delete first element '''\n if index == 1 and current.value == value:\n print (\"deleting first element\")\n current.next = current.next.next\n return\n \n ''' delete last element '''\n while not current.next.next and current.next.value == value:\n print (\"deleting last element\")\n current.next = None\n return\n \n ''' anywhere in between '''\n while current.next.next and current.next.value != value:\n current = current.next\n \n ''' delete the element '''\n print (\"deleting anywhere between element\")\n current.next = current.next.next\n return", "def remove(self, value): # real signature unknown; restored from __doc__\n pass", "def delete(self, value):\n # Iterating to node that has value\n node = self.head\n last_node = None\n while node is not None and node.value != value:\n last_node = node\n node = node.next_\n\n # Check if the node has been found\n if node is None:\n return\n\n # Checking whether head matched\n if last_node is None:\n self.head = node.next_\n return\n\n # Deleting node\n last_node.next_ = node.next_", "def del_value(self):\n return self.list.pop()", "def remove_from_head(self):\n\n if self.size == 0: # no elements in list\n return None # nothing to return\n\n removed_value = self.head.value # make a copy of the node to be deleted\n\n if self.size == 1: # if only one element in list (node is head and tail)\n self.head = self.tail = None # list will be empty\n\n else: # more than one element in list\n self.head = self.head.next # shift head right (reassign head to head.next)\n self.head.prev = None # reassign head.prev to point at None (it used to point at old_head)\n\n self.size -= 1\n return removed_value", "def __delitem__(self, value) -> bool: # True -> if element was deleted else False\n if not self.head:\n return False\n if self.head.value == value:\n if self.head.next_value:\n self.head = self.head.next_value\n else:\n self.head = None\n return True\n link = self.head.next_value\n prev = self.head\n while link:\n if link.value == value:\n prev.next_value = link.next_value\n return True\n prev = link\n link = link.next_value\n return False", "def remove(self, value):\n for i, v in enumerate(self):\n if v == value:\n self._table.pop(i); return\n raise ValueError, \"list.remove(x): x not in list\"", "def remove_value(self, key: keyType, value: valueType) -> None:\n self.validate(key, value)\n head_node_index, chain_node_index = self.exist_key(key)\n if head_node_index == -1:\n raise Exception\n if value not in self.hashTable[head_node_index].singlyLinkedList[chain_node_index].values:\n raise Exception\n if self.hashTable[head_node_index].count == 1:\n self.hashTable[head_node_index] = HeadNode()\n elif self.hashTable[head_node_index].count > 1:\n values_number = len(self.hashTable[head_node_index].singlyLinkedList[chain_node_index].values)\n if 1 == values_number:\n self.hashTable[head_node_index].count -= 1\n self.hashTable[head_node_index].singlyLinkedList.pop(chain_node_index)\n elif values_number > 1:\n self.hashTable[head_node_index].count -= 1\n self.hashTable[head_node_index].singlyLinkedList[chain_node_index].values.remove(value)\n else:\n raise Exception\n else:\n raise Exception", "def remove(self, item):\n \"\"\"\n :type item: Node()\n :rtype None\n \"\"\"\n if self.head.getData() == item:\n self.head = self.head.getNext()\n return\n\n prev = curr = self.head\n while curr: \n if curr.getData() == item:\n prev.setNext(curr.getNext())\n break\n prev = curr\n curr = curr.getNext()", "def remove(self, value):\n self.values.remove(value)", "def remove(self, val: Generic[T]) -> None:\n def remove_node(node: Node) -> Node: #recursive function\n if node is self.node:\n return node\n if node.val == val: #removes all nodes with value val\n next_node = node.next\n prev_node = node.prev\n\n prev_node.next = next_node\n next_node.prev = prev_node\n remove_node(node.next)\n\n remove_node(self.node.next)", "def remove(self, key):\n if self.head is None:\n print('Cannot remove from empty list!')\n return\n if self.head.data == key:\n self.head = self.head.next\n return\n\n itr = self.head\n prev = ListNode()\n while itr:\n curr = itr\n if itr.data == key:\n prev.next = curr.next\n return\n prev = curr\n itr = itr.next", "def remove(self, d):\n\n if self.head is not None:\n if self.head.data == d:\n self.head = self.head.next\n else:\n temp = self.head\n while temp.next is not None:\n if temp.next.data == d:\n temp.next = temp.next.next\n break\n else:\n temp = temp.next", "def remove(self, value) -> None:\n key = getattr(value, self.keyattr)\n if callable(key):\n key = key()\n with suppress(ValueError):\n self.data[key].remove(value)\n self.size -= 1", "def removeNodesByValue(self, value): # Class O(nlog2n)\r\n # I'm assuming this classification because this function\r\n # calls removeNode()\r\n h = self.head\r\n count = 1\r\n while count <= self.length():\r\n try:\r\n if h.value == value:\r\n self.removeNode(count)\r\n if h.next != h:\r\n h = h.next\r\n next\r\n else:\r\n count += 1\r\n h = h.next\r\n except:\r\n break", "def delete_by_value(self, key):\n cur_node = self.head\n\n if cur_node and cur_node.data == key:\n self.head = cur_node.next\n cur_node = None\n prev = None\n while cur_node and cur_node.data != key:\n prev = cur_node\n cur_node = cur_node.next\n if cur_node is None:\n return\n prev.next = cur_node.next\n cur_node = None", "def remove(self, data):\n\n traverse = self.head\n temp = self.head\n if self.head == None:\n return None\n\n if traverse.data == data:\n self.head = traverse.next\n return\n\n while traverse.next != None:\n\n temp = traverse.next\n if temp.data == data:\n traverse.next = temp.next\n return\n\n traverse = traverse.next", "def remove(self, data):\n\n traverse = self.head\n temp = self.head\n if traverse.data == data:\n self.head = traverse.next\n return\n\n while traverse.next != None:\n\n temp = traverse.next\n if temp.data == data:\n traverse.next = temp.next\n return\n\n traverse = traverse.next", "def remove(self, item):\n \n previous = None\n current = self.head\n \n while current is not None:\n \n if current.get_data() == item:\n # If the item to be removed is the first item\n if previous is None:\n self.head = current.get_next()\n else:\n previous.set_next(current.get_next())\n return\n \n else:\n previous = current\n current = current.get_next()", "def remove(self, value):\n found = False\n for i in range(len(self.data)):\n if self.data[i] != value:\n pass\n else:\n found = True\n self.__delitem__(i)\n break\n if not found:\n raise ValueError", "def remove(self, item: Any) -> None:\n curr = self._first\n\n if not curr:\n raise ValueError\n\n elif curr.item == item:\n self._first = self._first.next\n self._length -= 1\n\n else:\n while curr is not None:\n if curr.next and curr.next.item == item:\n curr.next = curr.next.next\n self._length -= 1\n return\n curr = curr.next\n raise ValueError", "def remove(self , element):\n current = self.head \n previous = None\n\n while current and current.data != element:\n previous = current\n current = current.next\n\n if previous == None :\n self.head = current.next\n elif current :\n previous.next = current.next\n current.next = None", "def remove(self, val):\n i = self.d.get(val)\n if i is None:\n return False\n assert 0 <= i < len(self.l)\n last_val = self.l[-1]\n if val != last_val:\n self.d[last_val] = i\n self.l[i] = last_val\n del self.d[val]\n _ = self.l.pop()\n return True", "def delete(self, node):\n\n if self.size == 0: # if list is empty\n return None # nothing to delete\n\n removed_value = node.value # copy deleted node's value\n\n if self.size == 1: # if only one item in list\n self.head = self.tail = None\n self.size -= 1\n\n else: # more than one element in list\n if self.head is node: # node to delete is head\n self.head = node.next # reassign head to be element after head\n\n elif self.tail is node: # node to delete is tail\n self.tail = node.prev # reassign tail to be element before tail\n\n else: # node is neither head nor tail, putting it somewhere in the middle\n node.prev.next = node.next\n node.next.prev = node.prev\n\n node.next = node.prev = None\n self.size -= 1\n\n return removed_value", "def remove(self, key):\n if self.head.data == key: # checking first corner case of first node to be removed\n self.head = self.head.next\n return\n\n elif self.head is None: # checking second corner case of linked list being empty\n return\n\n else: # otherwise maintain two pointers and remove the required node\n curr_node = self.head.next\n prev_node = self.head\n while prev_node.next is not None:\n if curr_node.data == key:\n prev_node.next = curr_node.next\n return\n\n return", "def del_node(self, val):\n try:\n del self[val]\n for key in self:\n if val in self[key]:\n self[key].remove(val)\n except KeyError:\n raise ValueError('Value not in graph')", "def remove(self, key):\n current = self.head\n previous = None # Para guardar la referencia al nodo previo conforme se atraviesa la lista\n found = False # Servirá como una stopping condition\n # El ciclo atravesará la lista mientras found sea False, cuando se encuentre el valor, found tomará el valor True y el ciclo terminará\n while current and not found:\n if current.data == key and current is self.head: # Se ejecuta si el valor a remover está en la head\n found = True\n self.head = current.next_node\n\n elif current.data == key: # Cuando el valor de key es igual al valor de otro nodo que no está en head\n found = True\n previous.next_node = current.next_node # La referencia al siguiente nodo después de previous es la del nodo después del nodo que se va a remover\n else:\n previous = current # Para hacer un seguimiento del nodo previo\n current = current.next_node # El nodo actual es el siguiente del nodo analizado, para seguir atravesando la lista\n \n return current", "def delete(self, value):\n if len(self.h) > 1 and value:\n\n # find value\n if value in self.d:\n del_idx = self.d[value]\n else:\n del_idx = None\n\n # delete the thing\n if del_idx:\n\n if del_idx == (len(self.h) - 1):\n # last element\n # [None, 1] -> [None]\n self.d[self.h[del_idx]] = None\n self.h.pop()\n else:\n # nth element\n # [None, 1, .. n] -> [None, .. n]\n self.h[del_idx], self.h[len(self.h) - 1] = self.h[len(self.h) - 1], self.h[del_idx]\n self.d[self.h[del_idx]] = del_idx\n self.d[self.h[len(self.h) - 1]] = None\n self.h.pop()\n self.bubble_down(del_idx)", "def delete(self, value):\n if len(self.h) > 1 and value:\n\n # find value\n if value in self.d:\n del_idx = self.d[value]\n else:\n del_idx = None\n\n # delete the thing\n if del_idx:\n\n if del_idx == (len(self.h) - 1):\n # last element\n # [None, 1] -> [None]\n self.d[self.h[del_idx]] = None\n self.h.pop()\n else:\n # nth element\n # [None, 1, .. n] -> [None, .. n]\n self.h[del_idx], self.h[len(self.h) - 1] = self.h[len(self.h) - 1], self.h[del_idx]\n self.d[self.h[del_idx]] = del_idx\n self.d[self.h[len(self.h) - 1]] = None\n self.h.pop()\n self.bubble_down(del_idx)", "def pop(self):\n current = self.head\n new_head = current.next\n self.head = new_head\n return current.val", "def remove(self, key: int) -> None:\n index = key % self.size\n if self.table[index].value is None:\n return \n \n p = self.table[index]\n \n if p.key == key:\n if p.next is None:\n self.table[index] = ListNode()\n else:\n self.table[index] = p.next\n return\n \n prev = p\n while p:\n if p.key == key:\n prev.next = p.next\n return\n prev = p\n p = p.next\n #p = p.next\n #prev = p\n #prev, p = p, p.next", "def removeItem(self, value):\n\t\tif self._linkHead == None:\n\t\t\treturn False\n\n\t\tif self._linkHead._itemValue == value:\n\t\t\tif self._linkHead == self._linkTail:\n\t\t\t\tself._linkHead = None\n\t\t\t\tself._linkTail = None\n\t\t\telse:\n\t\t\t\tself._linkHead = self._linkHead._itemNext\n\t\t\t\tself._linkHead._itemPre = None\n\n\t\t\treturn True\n\n\t\t_nodeCursor = self._linkHead\n\n\t\twhile _nodeCursor != None and _nodeCursor._itemValue != value:\n\t\t\t_nodeCursor = _nodeCursor._itemNext\n\n\t\tif _nodeCursor != None:\n\t\t\tif _nodeCursor == self._linkTail:\n\t\t\t\tself._linkTail = _nodeCursor._itemPre\n\t\t\t\tself._linkTail._itemNext = None\n\t\t\telse:\n\t\t\t\t_nodeCursor._itemPre._itemNext = _nodeCursor._itemNext\n\t\t\t\t_nodeCursor._itemNext._itemPre = _nodeCursor._itemPre\n\n\t\t\treturn True\n\n\t\treturn False", "def remove(self, value):\r\n if value not in self:\r\n raise KeyError(value)\r\n self.discard(value)", "def remove(self, data):\n prev = None\n curr = self.head\n while curr != None:\n if curr.data == data:\n self.size -= 1\n if curr == self.head:\n self.head = curr.next\n else:\n prev.next = curr.next\n if curr == self.tail:\n self.tail = prev\n return curr\n else: \n prev = curr\n curr = curr.next\n return None", "def remove(self, key: int) -> None:\n \n \n hashvalue=key% 1000\n if self.hashset[hashvalue]==None:\n return\n head = self.hashset[hashvalue]\n dummy_head = Node(0)\n curr = dummy_head\n while head:\n k,v = head.data\n if k==key:\n head=head.next\n curr.next=head\n curr= curr.next\n if head != None:\n \n head = head.next\n \n self.hashset[hashvalue]=dummy_head.next", "def remove(self, key: int) -> None:\n \n index = self.hash(key)\n\n # If list doesn't exist just return\n if not self.map[index]: return\n\n # If it's the head of list, manipulate pointers\n if self.map[index].val[0] == key:\n self.map[index] = self.map[index].next\n return\n\n curr = self.map[index]\n\n # Search through list\n while curr.next:\n # If the value in list matches key, manipulate list\n if curr.next.val[0] == key: \n curr.next = curr.next.next\n return\n\n curr = curr.next\n\n # Otherwise if it's not in list do nothing", "def remove(self):\r\n if self.first() is not None:\r\n self.dec_size()\r\n self.set_first(self.first().next())\r\n if self.size() == 0: # when there are no more elements in the list,\r\n self.__last = None # remove the pointer to the last element\r", "def remove(self, val):\n ind = self.table.pop(val, None)\n if ind is None:\n return False\n key = self.ls.pop()\n if len(self.ls)!=0 and len(self.ls) != ind:\n self.ls[ind] = key\n self.table[key] = ind\n return True", "def remove(self, val):\n if val in self.dict_val:\n list_index = self.dict_val[val]\n last_ele_index = len(self.list_val) -1\n if list_index == last_ele_index:\n self.dict_val.pop(val)\n self.list_val.pop()\n else:\n self.dict_val[self.list_val[last_ele_index]] = list_index\n self.list_val[list_index], self.list_val[last_ele_index] = self.list_val[last_ele_index], self.list_val[list_index]\n self.dict_val.pop(val)\n self.list_val.pop()\n # for index in range(list_index, len(self.list_val)):\n # self.dict_val[self.list_val[index]] -= 1\n # self.dict_val.pop(val)\n # self.list_val.pop(list_index)\n return True\n else:\n return False", "def remove(self, item):\n \n previous = None\n current = self.head\n \n while current is not None:\n \n if current.get_data() == item:\n # If the item to be removed is the first item\n if previous is None:\n self.head = current.get_next()\n else:\n previous.set_next(current.get_next())\n return\n \n # Early stop\n elif current.get_data() > item:\n return\n \n else:\n previous = current\n current = current.get_next()", "def remove(self, element):\n\n currentNodePointer = self.head\n # case where the first node has the element as value then erase the value\n if(currentNodePointer.getData() == element):\n self.head = self.head.getNext()\n return True\n \n while(currentNodePointer.getNext() is not None):\n if(currentNodePointer.getNext().getData() == element):\n currentNodePointer.setNext(currentNodePointer.getNext().getNext())\n return True\n else:\n currentNodePointer = currentNodePointer.getNext()\n return False", "def remove(self,valor):\n\n if self.size==0:\n return False\n else:\n current=self.first\n try:\n while current.next.valor!=valor:\n current=current.next\n deleted_node=current.next\n current.next=deleted_node.next\n except AttributeError:\n return False\n self.size-=1\n return deleted_node", "def remove(self, key):\r\n\r\n\t\t# if the key doesn't exist, exit the function\r\n\t\tif not self.contains_key(key):\r\n\t\t\treturn\r\n\t\telse:\r\n\t\t\tindex = self.get_index(key) # get the index of the key\r\n\t\t\tlinked_list = self._buckets[index] # now get the entire linked list\r\n\t\t\tlinked_list.remove(key) # call the remove function from the linked list\r\n\t\t\tself.size -= 1 # subtract 1\r", "def remove_a_specific_item(self, index):\n\n current = self.head\n previous = None\n for i in range(index):\n previous = current\n current = current.next\n if previous is None: self.head = current.next\n else: previous.next = current.next\n self.size -= 1", "def remove(self, val):\n if self.lookup.get(val, 0) > 0:\n self.lookup[val] = self.lookup.get(val, 0) - 1", "def remove(self, val: int) -> bool:", "def remove(self, e):\n try:\n del self.vals[e]\n except:\n return", "def remove(self, value):\n # Using a slice assignment (children[:] =) the list is modified instead of assign the name to a new list (children =).\n self.children[:] = (child for child in self.children if child.value != value)", "def remove(self, node):\n current = self.head\n target_node = node\n if target_node == current:\n self.pop()\n elif target_node == self.tail:\n self.shift()\n else:\n while current.next_node:\n try:\n if current.next_node == target_node:\n next_node = current.next_node\n # current.next_node = target_node.next_node\n next_node = target_node.next_node\n # target_node = current.next_node\n next_node.previous_node = current.previous_node\n break\n current = current.next_node\n except AttributeError:\n pass\n else:\n raise AttributeError", "def remove_recursive(self, value, node=None):\n if node == None:\n node = self.head\n\n if node.value == value:\n if node.prev:\n node.prev.next = node.next\n else:\n self.head = node.next\n if node.next:\n node.next.prev = node.prev\n elif node.next:\n self.remove_recursive(value, node.next)", "def remove(self, index=0):\n # Error case: Index out of acceptable range\n if index < 0 or index >= self._size:\n raise RangeError(\"index out of range.\")\n\n # Edge case: Remove from front of list\n # Behave list pop_front()\n if (index == 0):\n return self.pop_front()\n\n # Edge case: Remove from end of list\n # Behave list pop_back()\n if (index == self._size - 1):\n return self.pop_back()\n\n i = 1\n current_node = self._head.next\n\n while(i < index):\n current_node = current_node.next\n i += 1\n\n current_node.prev.next = current_node.next\n current_node.next.prev = current_node.prev\n self._size -= 1\n\n return current_node.value", "def remove(self, val):\n node = self.search(val)\n if not node:\n return\n if node.left and not node.right:\n self._remove_parent(node.left)\n self.size_number -= 1\n return\n elif node.right and not node.left:\n self._remove_parent(node.right)\n self.size_number -= 1\n return\n nxt = None\n nxt = self._nxt_inorder(nxt, node, val)\n if nxt is None:\n try:\n self._redirect(node, None)\n except AttributeError:\n self.root = None\n self.size_number -= 1\n return\n self.remove(nxt.data)\n self._replace_node(nxt, node)\n\n # check balance on parent of the node we just removed", "def remove(self, key):\n index = self._hash_mod(key)\n node = self.storage[index]\n node_before = None\n if node:\n while node:\n if node.key == key:\n if node_before:\n node_before.next = node.next\n elif node.next:\n self.storage[index] = node.next\n else:\n self.storage[index] = None\n self.key_count -= 1\n return\n node_before = node\n node = node.next\n print(f\"An element with key '{key}' cannot be found!\")", "def remove(self, index):\n self.__validate_index(index)\n value = self.__list[index]\n self.__list = self.__list[:index] + self.__list[index + 1:]\n return value", "def remove(self, key):\n\n node = self._get_node(key) # Check to see if the key is in the table\n if node is None: # Key is not in the table (do nothing)\n return\n\n index = self._get_index(key) # Get the index for the LinkedList\n node = self._buckets[index].head # Start at the head of the LinkedList\n\n if node.key == key: # Handle the case where key is at the head\n self._buckets[index].head = node.next\n\n else:\n previous = node\n current = node.next\n while current.key != key: # Find the link with the right key\n previous = current\n current = current.next\n previous.next = current.next # Cut the link out of the list\n\n self.size -= 1", "def remove(self, value):\n self.arr.remove(value)", "def remove(self, node):\n if type(node) is Node:\n prev = None\n curr = self.head\n while curr:\n if curr is node:\n if prev:\n prev.next = curr.next\n else:\n self.head = curr.next\n self._length -= 1\n break\n prev = curr\n curr = curr.next\n else:\n raise ValueError(\"Cannot remove node not in list.\")\n else:\n raise ValueError(\"Argument to remove must be of node type.\")", "def remove(self, element):\n if self.head.element == element:\n self.head = self.head.next\n self.head.prev = None\n return None\n cursor = self.head\n while cursor.next is not None:\n if cursor.next.element == element:\n cursor.next = cursor.next.next\n if cursor.next is not None:\n cursor.next.prev = cursor\n break\n else:\n cursor = cursor.next", "def pop(self):\n if not self.head:\n raise IndexError('Cannot pop from empty linked list.')\n\n popped_value = self.head.val\n self.head = self.head.next\n return popped_value", "def remove(self, item):\n\t\tif self.len == 0:\n\t\t\traise ValueError(\"Lista vacia\")\n\t\tif self.prim.dato == item:\n\t\t\tself.borrar_primero()\n\t\t\treturn\n\t\tanterior = self.prim\n\t\tactual = anterior.prox\n\t\twhile actual and actual.dato != item:\n\t\t\tanterior = anterior.prox\n\t\t\tactual = actual.prox\n\t\tif not actual:\n\t\t\traise ValueError(\"Elemento no encontrado\")\n\t\tanterior.prox = actual.prox\n\t\tself.len -= 1", "def pop(self):\n removed_node = self.top\n self.top = self.top._next\n self._size -= 1\n return removed_node.val", "def remove(self, val: int) -> bool:\n if val in self.l:\n self.l.remove(val)\n return True\n return False", "def remove(self, key: int) -> None:\n pos = key % self.space\n head = self.hash_table[pos]\n curr = head\n\n while curr.next:\n if curr.next.key == key:\n curr.next = curr.next.next\n return\n curr = curr.next", "def remove(self, val):\n temp = self.table.pop(val, None)\n if temp is None:\n return False\n return True", "def pop(self):\n if self.is_empty():\n raise ValueError\n\n item = self.linked_list.head\n self.linked_list.head = item.next\n\n item.next = None\n\n self.linked_list.node_count -= 1\n\n return item.data", "def remove(self, val):\n # if it doesn't exist return error \n if val not in self.inds:\n return False\n # find the index for the val in list, and take the last element\n ind, temp = self.inds[val], self.nums.pop() \n # if the one to delete is not the last number \n if ind < len(self.nums):\n # place the last element at where the val was\n # and update the index for the last element\n self.nums[ind], self.inds[temp] = temp, ind\n # delete the index for val\n del self.inds[val]\n return True", "def remove(self, val: int) -> bool:\n        if val in self.hashmap:\n            temp=self.list[-1]\n            self.list[-1],self.list[self.hashmap[val]]=self.list[self.hashmap[val]],self.list[-1]\n            self.hashmap[temp]=self.hashmap[val]\n            self.list.pop()\n            del self.hashmap[val]\n            return True\n        return False", "def delete_value(self, value):\n del self.index[value]", "def remove(self, item):\n \n if self.head is None:\n raise EmptyListError\n \n if self.head.data == item:\n self.remove_head()\n return print(f'{item} removed')\n \n prev_node, curr_node = None, self.head \n \n while curr_node is not None:\n if curr_node.data == item:\n prev_node.next = curr_node.next\n return print(f'{item} removed')\n prev_node = curr_node\n curr_node = curr_node.next\n \n print(\"item not found\")", "def remove_from_tail(self):\n\n if self.size == 0: # if list is empty\n return None # nothing to remove; return out\n\n tail_to_remove = self.tail # copy value of current tail before deletion (for return)\n tail_to_remove.prev = tail_to_remove.next = None # remove any ties to list\n\n if self.size == 1: # if only one item in list\n self.head = self.tail = None # list will now be empty\n\n else:\n self.tail.prev.next = None # reassign new tail's prev to None (last item)\n self.tail = self.tail.prev # shift tail left\n\n self.size -= 1 # decrease size (deleting el)\n return tail_to_remove.value # return value of removed tail", "def remove(self, key: int) -> None:\n hashKey = key % 1000\n prev = node = self.bucket[hashKey]\n if not node: return\n if node.pair[0] == key:\n self.bucket[hashKey] = node.next\n else:\n node = node.next\n while node:\n if node.pair[0] == key:\n prev.next = node.next\n break\n else:\n prev, node = prev.next, node.next", "def remove(self,index=0):\n if index>self.size-1: raise IndexError(\"Index out of range.\")\n elif self.size==1: self.reference=None\n else:\n pointer = self.reference\n for i in range(index): pointer = pointer.next\n pointer.previous.next, pointer.next.previous = pointer.next, pointer.previous\n if index==0: self.reference=self.reference.next\n self.size-=1", "def del_node(self, val):\n try:\n del self[val]\n for key in self:\n if val in self[key]:\n del self[key][val]\n except KeyError:\n raise ValueError('Value not in graph')", "def delete(self):\n if self.head is None:\n return None\n item = self.head.data\n self.head = self.head.next\n return item", "def remove(self, key):\n # O(1) in best case and O(n) in worst case Time Complexity\n # O(1) in best case and O(n) in worst case Space Complexity\n\n currentNode = self.getElement(key)\n if (currentNode.next != None):\n currentNode.next = currentNode.next.next\n return", "def _del(self, key: int) -> int:\n node = self.htab.pop(key)\n node.prev.next = node.next\n node.next.prev = node.prev\n return node.val", "def removeNode(self, node__to__remove): # Class O(nlog2n)\r\n # This is clear the worst function. It goes to different if statements before\r\n # start the 'real' computation to replace the value\r\n if node__to__remove > self.length():\r\n raise ValueError(\"Invalid position. The LinkedList has length %s\" % self.length())\r\n elif node__to__remove == 1:\r\n if self.length() == 1:\r\n raise ValueError(\"The LinkedList has only one node (the head)\")\r\n if self.length() == 2:\r\n self.head = Node(self.head.next)\r\n else:\r\n self.head = Node(self.head.next, self.head.next.next)\r\n elif (self.length() - 1) == node__to__remove:\r\n h = self.head\r\n count = 1\r\n while count != (node__to__remove - 1):\r\n h = h.next\r\n count += 1\r\n h.next = Node(h.next.next)\r\n elif self.length() == node__to__remove:\r\n h = self.head\r\n count = 2\r\n while count != (node__to__remove - 1):\r\n h = h.next\r\n count += 1\r\n h.next = Node(h.next)\r\n else:\r\n h = self.head\r\n count = 2\r\n while count != node__to__remove:\r\n h = h.next\r\n count += 1\r\n h.next = Node(h.next.next, h.next.next.next)", "def remove_value(self, value: Hashable) -> bool:\n\t\treturn self.remove_values([value])", "def pop(self):\n if self.head is None:\n raise IndexError(\"Cannot pop from an empty linked list.\")\n first = self.head.val\n self.head = self.head.next\n self._length -= 1\n return first", "def remove(self, val: int) -> bool:\n if val not in self.dict:\n return False\n last_ele, idx = self.list[-1], self.dict[val]\n self.list[idx], self.dict[last_ele] = last_ele, idx\n\n self.list.pop()\n del self.dict[val]\n return True", "def remove(self, val: int) -> bool:\n if val not in self.dict: return False\n \n index_of_removing_element = self.dict[val]\n last_element = self.list[-1]\n # put list last element into that index \n self.list[index_of_removing_element] = self.list[-1]\n \n # change index of last element which got swapped\n self.dict[last_element] = index_of_removing_element\n \n self.list.pop()\n del self.dict[val]\n # print(\"remove\",val, \"==>\", self.dict, self.list)\n return True", "def remove_node(self, data):\n if not self.head:\n raise Exception(\"List is empty\")\n if self.head.data == data:\n self.head = self.head.next\n return\n previous_node = self.head\n for node in self:\n if node.data == data:\n previous_node.next = node.next\n return\n previous_node = node\n raise Exception(\"Node with data '{}' not found\".format(data))", "def remove(self, e):\r\n try:\r\n self.vals.remove(e)\r\n except:\r\n raise ValueError(str(e) + ' not found')", "def pop(self):\n value = self.head.value\n try:\n if self.count > 1:\n self.head = self.head.previous\n self.head.next = None\n else:\n self.tail = None\n self.head = None\n self.count -= 1\n except AttributeError:\n AttributeError(u\"DLL is empty.\")\n return value" ]
[ "0.85559684", "0.8450625", "0.84335315", "0.828074", "0.819369", "0.8189939", "0.81423485", "0.80990094", "0.8078319", "0.7913155", "0.79033136", "0.7874182", "0.78658223", "0.78636545", "0.78086895", "0.7679407", "0.7675178", "0.7667091", "0.7643101", "0.75630695", "0.7549783", "0.7405654", "0.7336735", "0.7325203", "0.7305143", "0.7269773", "0.7257864", "0.7240768", "0.72272146", "0.7211016", "0.7210282", "0.7197837", "0.7180783", "0.71575946", "0.7113567", "0.7109958", "0.7107002", "0.7105292", "0.7081985", "0.7067516", "0.7059052", "0.70582044", "0.705089", "0.70434004", "0.70434004", "0.70393926", "0.70300144", "0.7012894", "0.69706434", "0.6967009", "0.69459975", "0.69435185", "0.6936383", "0.6931439", "0.6930388", "0.69269973", "0.69109136", "0.69045085", "0.6887625", "0.6868487", "0.68673146", "0.685131", "0.6831417", "0.6830969", "0.6820613", "0.6808257", "0.6796515", "0.6791006", "0.67907643", "0.6784406", "0.6773146", "0.67711157", "0.6754475", "0.674975", "0.67467594", "0.6743065", "0.67412794", "0.67299837", "0.67295736", "0.6728883", "0.67286545", "0.67269427", "0.6724067", "0.6722911", "0.6719043", "0.67121154", "0.67114496", "0.6687172", "0.66855437", "0.66780055", "0.6674495", "0.6666759", "0.6665636", "0.6658591", "0.66511285", "0.66482514", "0.6639307", "0.6610437", "0.66062677", "0.65927166" ]
0.8185222
6
Just check that the `_data_changed` parameter is added the response.
def test_returned_data_changed(self): request = RequestFactory().get('/') admin_instance = get_modeladmin(Iframe) response_302 = HttpResponseRedirect(redirect_to='/admin_mountpoint/') new_response = admin_instance.maybe_fix_redirection( request=request, response=response_302) # returned early because it was a redirect, but we updated the # querystring anyway self.assertEqual(new_response['X-Chunkadmin-Response'], 'early') self.assertEqual(302, new_response.status_code) self.assertEqual('/admin_mountpoint/?_data_changed=1', new_response['Location'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_data(self, data):\n self.logger.debug(\"handle_data()\")\n self.response_data.append(data)", "def data_changed(self):\n return", "def check_for_new_data(self):\n return", "def data_changed(self):\n self.data_changed_signal.emit(self)", "def on_new_data(self, data):\n raise NotImplementedError()", "def update(self, data):\n if self.service is not None:\n self.service.update_response(data)", "def data_received(self, data):\n pass", "def update_data(self):\n # Just set data_changed, the component should do the rest.\n self.data_changed = True", "def _data_updated_callback(self, attr, old, new):\n pass", "def update_response(self, response):\n\n if self.resource['operation'] in PyMongoEvent.INSERT_OPERATIONS:\n self.handle_insert_operations_response(response)\n\n elif self.resource['operation'] in PyMongoEvent.FILTER_OPERATIONS:\n self.handle_filter_operations_response(response)", "def has_changed(self):\n return bool(self.changed_data)", "async def _receive_updated_response(self, data):\n serialized_text_responses = await serialize_text_algo_api_response(data)\n await self.send_serialized_data(serialized_text_responses)", "def has_changed(self, initial, data):\n # For purposes of seeing whether something has changed, None is\n # the same as an empty dict, if the data or initial value we get\n # is None, replace it w/ {}.\n initial_value = self.to_python(initial)\n return super().has_changed(initial_value, data)", "def _validate_update_data(self, data):\n return", "def _hook_data(self, data):\n if self.data_hook is not None:\n self.data_hook(data)", "def response_change(self, request, obj):\n if '_custom_action' in request.POST:\n pass\n return super().response_change(request, obj)", "def change_data(self):\n\n if self.changed is not True:\n self.changed = True\n print('True')", "def response_received(self, ignored):\n self._received += 1", "def patch_data():\n return json.loads('{\"success\":true, \"message\":\"Field of data updated (but not really)\" }')", "def response_add(self, request, obj):\n if '_custom_action' in request.POST:\n pass\n return super().response_add(request, obj)", "def update_existing_key_fail(self, data, new_data):\n message = 'exists'\n rv = self.add_success(data)\n assert not in_response(rv, message)\n rv = self.add_success(new_data)\n assert not in_response(rv, message)\n rv = self.update_fail(data, message)\n assert self.verify_object(new_data)\n return rv", "def handle_data(self, d):\n self.result.append(d)", "def updateData(self):\n self.needsData.emit(self.property(\"number\"))", "def MyDataChangedCallback(self, inRefcon):\r\n pass", "def handle_data(self, data):\r\n self.fed.append(data)", "def set_data(self, data):\n\n if not self.has_data and not self.has_unsent_data:\n self.data = data\n self.has_data = True\n return True\n else:\n return False", "def on_endpoint_final(self, data):\n logger.debug('Final data from endpoint')", "def _callback(self, data: list):\n self.data = data", "def update_data():\n pass", "def add_to_response_json(data):\n if type(data) == dict:\n response_json.response.update(data)\n return response_json.response\n return {'update': 'failed', 'reason': 'data provided was not formatted as json'}", "def process_response(request, response):\n # A higher middleware layer may return a request which does not contain\n # messages storage, so make no assumption that it will be there.\n if hasattr(request, '_events'):\n # noinspection PyProtectedMember\n unstored_events = request._events.update(response)\n if unstored_events and settings.DEBUG:\n raise ValueError('Not all temporary events could be stored.')\n return response", "def on_data(self, data):\r\n if 'in_reply_to_status_id' in data:\r\n self.keep_or_update_tgid()\r\n self.insert_data(data)", "def on_signature_response(self, cache, new_message, changed):\n # TODO: we should ensure that new_message is correct (i.e. all checks made above)\n\n if new_message:\n self._statistic_outgoing_signature_request_success += 1\n # self._observation(new_message.candidate, cache.members[0], time())\n\n assert cache.request.payload.message.meta == new_message.meta\n return True\n\n else:\n self._statistic_outgoing_signature_request_timeout += 1\n self.remove_from_slope(cache.members[0])\n return False", "def main_response(self, data):", "def main_response(self, data):", "def handle_data(self, data):\n if len(self.current_tags) > 0:\n self.current_tags[-1].add_data(data)", "def has_data_changed(orig_data, final_data):\n debug = False\n if debug:\n print(f\"\\n{pprint.pformat(orig_data)}\\n{pprint.pformat(final_data)}\")\n data_changed = False\n final_fldnames = set()\n for final_dict in final_data:\n final_fldnames.add(final_dict[mg.TBL_FLDNAME])\n if (final_dict[mg.TBL_FLDNAME] != final_dict[mg.TBL_FLDNAME_ORIG] \n or final_dict[mg.TBL_FLDTYPE] != final_dict[mg.TBL_FLDTYPE_ORIG]):\n if debug: print('name or type changed')\n data_changed = True\n break\n ## get fld names from orig_data for comparison\n orig_fldnames = set([x[0] for x in orig_data])\n if orig_fldnames != final_fldnames:\n if debug: print('set of field names changed')\n data_changed = True\n return data_changed", "async def data_received(self, data: bytes) -> None:\n\n self.response_message.set_result(data)", "def add_success(self, data):\n rv = self.post(self.add_url, data)\n assert not in_response(rv, 'Add {}'.format(self.nice_name))\n assert self.verify_object(data)\n return rv", "def data_dict_update(self, change):\n self.data_dict = change['value']", "def store_response(self, new_response):\n self.responses.append(new_response)", "def on_response(self, response):\n pass", "def on_data(self, data):\n # store the data\n self._storage.append(data)", "def __observe_callback(self, response):\n\n # XXX: when client closes the last response is a NoneType\n if response is None:\n return\n elif coap_response_success(response):\n event = self.make_event_with_raw_data(response.payload)\n log.debug(\"received content update for observed resource: %s\" % self.remote_path)\n if self.policy_check(event):\n self.publish(event)\n\n self._last_observe_response = response\n return True\n else:\n # TODO: handle error codes and try to re-observe?\n # TODO: switch to polling if observe isn't supported by the server\n log.debug(\"unsuccessful observe request with code: %s. Retrying later...\" % coap_code_to_name(response.code))\n self.timed_call(self._timeout, self.__class__.observe_topic)\n return False", "def data_received(self, data):\n # self.debug(\"received data=%r\", binascii.hexlify(data))\n self.dispatcher.add_data(data)", "def handle_data(self, data):\n if self.article_body:\n if not self.suspend_acquisition:\n self.article_data += data", "def check_released(self, data, suffix=''): # pylint: disable=unused-argument\n # There aren't currently any server-side error conditions we report to the client,\n # but we send success/msg values anyway for consistency with other handlers.\n return {\n 'success': True, 'msg': u'',\n 'is_released': self.is_released()\n }", "def existing_key_fail(self, data, new_data):\n message = 'exists'\n rv = self.add_success(data)\n assert not in_response(rv, message)\n self.add_fail(data, message)\n rv = self.add_success(new_data)\n assert not in_response(rv, message)\n self.update_fail(data, message)\n assert self.verify_object(new_data)", "def has_data(self, *args, **kwargs):\n return False", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self, data):\n pass", "def check_response_valid_update(response: HTTPResponse) -> bool:\n return response.status_code == 200", "def collect_incoming_data(self, data):\n self.l.debug('data -> (%d bytes):\"%s\"', len(data), data)\n self.received_data.append(data)", "def update_success(self, data, new_data):\n self.add_success(data)\n return self.edit_success(data[self.id_field], new_data)", "def has_data(self):\n return len(self.data) > 0", "def attempted_change(self):\n return any(self._get_field_data())", "def _cache_response(self, packet):\n self.operator.update_message(packet.message_id, packet.from_node, packet.ret_parameters)", "def updates_check():\n data = wait_for_callback(client, cb_updates_name)\n self.assertTrue(isinstance(data, dict))", "def has_data(self):\n return self._data is not None", "def on_data(self, callback, remove=False):\n self._data_handlers.register_callback(callback, remove=remove)", "def handleResponseEnd(self):\r\n try:\r\n if not self._finished:\r\n reactor.callInThread(\r\n self.resource.cacheContent,\r\n self.father,\r\n self._response,\r\n self.buffer\r\n )\r\n proxy.ProxyClient.handleResponseEnd(self)\r\n except RuntimeError:\r\n # because we don't care if the user hits\r\n # refresh before the request is done\r\n pass", "def should_refresh_client_fnc(response):\n return not response", "def is_data_response(resp: Response, data_field: str = None) -> bool:\n\n return \\\n resp and \\\n is_dict(resp) and \\\n is_success_response(resp) and \\\n resp.get(\"data\") is not None and \\\n (not data_field or data_field in resp.get(\"data\"))", "def response_add(self, request, obj):\r\n\r\n # in these cases, the redirect is good\r\n if list(set(request.POST.keys()) & set([\"_addanother\", \"_continue\"])):\r\n return super(ServeeModelAdmin, self).response_change(request, obj)\r\n\r\n # we want to override the default save case in the frontend\r\n ref = request.META.get(\"HTTP_REFERER\")\r\n if ref and (ref.find(\"/servee/\") == -1):\r\n if request.is_ajax():\r\n return HttpResponse(\"<script type='text/javascript'>window.location.reload(true);</script>\")\r\n else:\r\n return HttpResponseRedirect(ref)\r\n\r\n # fallback to normal functionality\r\n return super(ServeeModelAdmin, self).response_add(request, obj)", "def set(self, data):\n ret = self._rest_call(data, 'POST')\n return ret[0] == 200", "def handle_actual_updated(self):\n self._actual_updated()", "def data_received(self, data):\n self.timer.cancel()\n logger.debug('-> Data received: {!r}'.format(data))\n response = self.handle_data(data)\n if response is not None:\n logger.debug('<- Sending response: {!r}'.format(response))\n self.transport.write(response)", "def rDataChanged(self):\n\n self._queues.uResolutionTab.refreshData()\n self._layerManager.updateReviewLayer()", "def collect_incoming_data(self, data):\n self.logger.debug('collect_incoming_data() -> (%d)\\n\"\"\"%s\"\"\"', len(data), data)\n self.received_data.append(data)", "def hadChanged(self):\n return self.changed", "def update(self, data):\n return data", "def put_data():\n return json.loads('{\"success\":true, \"message\":\"Data updated (but not really)\" }')", "def update_response(self, response):\r\n self.stri.update_response(response)", "def update_response(self, response):\r\n self.stri.update_response(response)", "def process_data_callback(self, res):\n self.current_in_progress -= 1", "def validate_response(self, response):\n pass", "def XPLMDataChanged_f(inRefcon):", "def data(self):\n if not self.response or self.should_refresh_client():\n self.refresh_client()\n return self.response", "def on_data(self, data):\n if 'in_reply_to_status_id' in data:\n status = tweepy.Status.parse(self.api, json.loads(data))\n if self.on_status(status,data) is False:\n return False\n elif 'delete' in data:\n delete = json.loads(data)['delete']['status']\n if self.on_delete(delete['id'], delete['user_id']) is False:\n return False\n elif 'limit' in data:\n if self.on_limit(json.loads(data)['limit']['track']) is False:\n return False", "def isDataChanged(self):\n return bool(self._dirty or self.ui.channelEditor.getQModel().isDataChanged() or self._dirtyMntGrps)", "def has_data(self):\n\n return self._data is not None", "def has_data(self):\n\n return self._data is not None", "def changed_event(self):\n return True", "def data_status(self, data_status):\n self._data_status = data_status", "def _response_handler_callback(response):\n response_data = json.loads(response)\n if ('status' in response_data and response_data['status'] != 1) or ('status' not in response_data):\n Mixpanel.LOGGER.warning(\"Bad API response: \" + response)\n raise RuntimeError('Import or Update Failed')\n Mixpanel.LOGGER.debug(\"API Response: \" + response)", "def patch_json(self, data):\n def hook(response):\n response.patch_json(data)\n return self.dynamic_hook(hook)", "def on_data(self, data):\n self.tweets.append(data)\n if len(self.tweets) >= self.batch_size:\n self.write_to_pubsub(self.tweets)\n self.tweets = []\n self.count += 1\n\n if (self.count % 1000) == 0:\n print('count is: {} at {}'.format(\n self.count, datetime.datetime.now())\n )\n return True", "def __itemChanged(self, event):\n if event in (items.ItemChangedType.DATA, items.ItemChangedType.MASK):\n self._updateFromItem()", "def has_data_appended(self):\n return self._incompat_flags[0] & 0x1", "def listener(self, proxy, changed_properties, invalidated_properties):\n metadata = changed_properties.lookup_value('Metadata')\n # do not signal if the metadata is empty\n self.process_metadata(metadata, False)", "def process_commit_request(self):\n global data_store\n data_store.commit_changes()\n return 204", "def listenerInform(self, data):\n return self._listenDataList.append(data)", "def updateResponseEmbeddedData(self, SurveyID, ResponseID, ED, **kwargs):\n if not self.request(\n \"updateResponseEmbeddedData\",\n SurveyID=SurveyID,\n ResponseID=ResponseID,\n ED=ED,\n **kwargs):\n return False\n return True", "def process_response(self, request, response):\n return response", "def process_response(self, request, response):\n return response", "def _request_ended(self, reply):\n\n if reply.attribute(QNetworkRequest.HttpStatusCodeAttribute):\n self.http_resources.append(HttpResource(reply, self.cache))", "def _on_data_added(self, msg):\n self._link_new_data()\n data_item = self._create_data_item(msg.data.label)\n self.state.data_items.append(data_item)", "def on_data(self, data):\n self.tweets.append(data)\n if len(self.tweets) >= self.batch_size:\n self.write_to_pubsub(self.tweets)\n self.tweets = []\n self.count += 1\n # if we've grabbed more than total_tweets tweets, exit the script.\n # If this script is being run in the context of a kubernetes\n # replicationController, the pod will be restarted fresh when\n # that happens.\n if self.count > self.total_tweets:\n return False\n if (self.count % 1000) == 0:\n print 'count is: %s at %s' % (self.count, datetime.datetime.now())\n return True" ]
[ "0.7027461", "0.66865385", "0.65378416", "0.6361653", "0.63519835", "0.6307863", "0.6246295", "0.6245001", "0.6158304", "0.61403364", "0.61214167", "0.61103153", "0.6078733", "0.6078601", "0.59536886", "0.59317225", "0.5920005", "0.5906738", "0.5872005", "0.5840075", "0.5838024", "0.58355594", "0.581066", "0.5810348", "0.58096904", "0.5802387", "0.57939386", "0.57917976", "0.57817364", "0.57672936", "0.5762848", "0.5760207", "0.57584494", "0.57560384", "0.57560384", "0.5752033", "0.57518256", "0.5750046", "0.5743138", "0.5740346", "0.57302153", "0.5700401", "0.5691591", "0.5684794", "0.5676934", "0.5672052", "0.5669892", "0.5665477", "0.56576663", "0.5652961", "0.5652961", "0.5652961", "0.5652961", "0.5639629", "0.5613942", "0.55906", "0.55803514", "0.5577955", "0.5570434", "0.556242", "0.55549705", "0.55499417", "0.55340374", "0.5522983", "0.5518652", "0.551832", "0.5505385", "0.5505159", "0.55051076", "0.54989505", "0.5490313", "0.54901314", "0.548761", "0.54860866", "0.54853636", "0.54853636", "0.5479082", "0.5462141", "0.54617333", "0.5459012", "0.5454452", "0.54441035", "0.5442855", "0.5442855", "0.542338", "0.5414946", "0.54115516", "0.5408894", "0.54039747", "0.54025924", "0.5399263", "0.5399124", "0.53973824", "0.53972673", "0.5397032", "0.53920865", "0.53920865", "0.53893304", "0.5374484", "0.53744423" ]
0.5979023
14
Going to a nonchunkadmin URL should be ok, and should also put the `_data_changed` parameter onto the URL.
def test_to_other_url(self): user = User(username='test', is_staff=True, is_superuser=True, is_active=True) user.set_password('test') user.full_clean() user.save() request = RequestFactory().get('/') response_302 = HttpResponseRedirect(redirect_to='/admin_mountpoint/') admin_instance = get_modeladmin(Iframe) new_response = admin_instance.maybe_fix_redirection( request=request, response=response_302, obj=user) self.assertEqual(new_response['X-Chunkadmin-Response'], 'not-chunkadmin') # noqa self.assertEqual(302, new_response.status_code) self.assertEqual('/admin_mountpoint/?_data_changed=1', new_response['Location'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_returned_data_changed(self):\n request = RequestFactory().get('/')\n admin_instance = get_modeladmin(Iframe)\n response_302 = HttpResponseRedirect(redirect_to='/admin_mountpoint/')\n new_response = admin_instance.maybe_fix_redirection(\n request=request, response=response_302)\n # returned early because it was a redirect, but we updated the\n # querystring anyway\n self.assertEqual(new_response['X-Chunkadmin-Response'], 'early')\n self.assertEqual(302, new_response.status_code)\n self.assertEqual('/admin_mountpoint/?_data_changed=1',\n new_response['Location'])", "def test_user_page_change_page(self):\n url = reverse('admin:core_user_change', args=[self.user.id])\n # houw args workd\n # admin/core/usre/\n res = self.client.get(url)\n #checking response for 200 ok page works\n self.assertEqual(res.status_code, 200)", "def test_data_admin_page(self):\n self.login(self.data_admin.user.username)\n self._go_to_data_admin_page()\n self.check_page_title(self.data_admin_config.get('PAGE_TITLE'))\n self.check_page_contains_ids(self.data_admin_config.get('ADMIN_LINKS'))", "def test_page_change_view(self):\n user = self._create_user({\"username\":\"user3\",\"email\":\"de@cd.com\"})\n anotheruser = self._create_random_user(startname=\"another_user_\")\n testproject = self._create_comicsite_in_admin(user,\"user3project\") \n testpage1 = create_page_in_admin(testproject,\"testpage1\")\n testpage2 = create_page_in_admin(testproject,\"testpage2\") \n url = reverse(\"admin:comicmodels_page_change\",\n args=[testpage1.pk])\n\n self._test_url_can_be_viewed(user,url) \n self._test_url_can_be_viewed(self.root,url)\n #TODO: The permissions are not correct, https://github.com/comic/comic-django/issues/306\n #self._test_url_can_not_be_viewed(anotheruser,url)", "def post(self) :\n self.redirect('/admin')", "def test_user_change_page(self):\n # example url: /admin/cor/user/<userID>\n url = reverse('admin:core_user_change', args=[self.user.id])\n res = self.client.get(url)\n\n self.assertEqual(res.status_code, 200)", "def test_user_change_page(self):\n\n # Get the admin url with the user id and send a GET request\n url = reverse('admin:core_user_change', args=[self.user.id])\n res = self.client.get(url)\n\n # Assertion\n self.assertEqual(res.status_code, 200)", "def test_user_change_page(self):\n url = reverse('admin:core_user_change', args=[self.user.id])\n # Works like: /admin/core/user/{id}\n res = self.client.get(url)\n\n self.assertEqual(res.status_code, 200)", "def web_admin_required(handler):\n\n def check_admin(self, *args, **kwargs):\n \"\"\"\n If handler has no login_url specified invoke a 403 error\n \"\"\"\n if not users.is_current_user_admin():\n self.response.write(\n '<div style=\"padding-top: 200px; height:178px; width: 500px; color: white; margin: 0 auto; font-size: 52px; text-align: center; background: url(\\'http://3.bp.blogspot.com/_d_q1e2dFExM/TNWbWrJJ7xI/AAAAAAAAAjU/JnjBiTSA1xg/s1600/Bank+Vault.jpg\\')\">Forbidden Access <a style=\\'color: white;\\' href=\\'%s\\'>Login</a></div>' %\n users.create_login_url(self.request.path_url + self.request.query_string))\n return\n else:\n return handler(self, *args, **kwargs)\n\n return check_admin", "def get(self):\n self.redirect('/admin')", "def get(self):\n self.redirect('/admin')", "def get(self) :\n setSessionMessageByRequest(self, \"Invalid Request\", True)\n self.redirect('/admin')", "def test_not_logged_cannot_update(self):\n\n utils.test_not_logged_cannot_access(self, self.url, self.data)", "def response_post_save_change(self, request, obj):\n opts = self.model._meta\n\n if \"next\" in request.GET:\n return HttpResponseRedirect(request.GET['next'])\n\n if self.has_change_permission(request, None):\n post_url = reverse('admin:%s_%s_changelist' %\n (opts.app_label, opts.module_name),\n args=(quote(self.prescription.pk),),\n current_app=self.admin_site.name)\n else:\n post_url = reverse('admin:index',\n current_app=self.admin_site.name)\n\n return HttpResponseRedirect(post_url)", "def http_method_not_allowed(self, request, *args, **kwargs):\n # Instead of just returning the standard \"method not allowed\" HTTP\n # status code, we can forward to the moderation admin\n return redirect(reverse('mod_admin'))", "def test_not_logged_cannot_update_tab(self):\n\n utils.test_not_logged_cannot_access(self, self.url, self.data)", "def GET_adminon(self):\r\n #check like this because c.user_is_admin is still false\r\n if not c.user.name in g.admins:\r\n return self.abort404()\r\n self.login(c.user, admin = True)\r\n\r\n dest = request.referer or '/'\r\n return self.redirect(dest)", "def test_user_page_change(self):\n url = reverse('admin:core_user_change', args=[self.user.id])\n res = self.client.get(url)\n\n self.assertEqual(res.status_code, 200)", "def checkForURL(self, data):\n \n moduleCoordinator.ModuleCoordinator().addEvent(moduleCoordinator.URL_EVENT, data, self.hash)", "def inaccessible_callback(self, name, **kwargs):\n return redirect(url_for('public.home', next=request.url))", "def changelist_view(self, request, extra_context=None):\n return HttpResponseRedirect(reverse('admin:index'))", "def changelist_view(self, request, extra_context=None):\n return HttpResponseRedirect(reverse('admin:index'))", "def test_partial_update_should_not_be_allowed(self):\n response = self.client.patch(self.get_url(), {})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)", "def goToAdmin(request):\n\n\ttemplate = '../admin'\n\treturn HttpResponseRedirect(template)", "def test_url_data_present_in_url(self):\n url_data = {'anything': 'my username'}\n req = self.httpbin_3.test_requests_patch_method(url_data=url_data, dry_run=True)\n path = self.httpbin_3.client['homepage']['test_requests_patch_method']['path']\n self.assertEqual(urlparse(req.prepared_request.url).path, quote(path.format(**url_data)))", "def locking_admin_changelist_js_url(self):\n return reverse('admin:' + self.locking_admin_changelist_js_url_name)", "def response_change(self, request, obj):\r\n \r\n # in these cases, the redirect is good\r\n if list(set(request.POST.keys()) & set([\"_addanother\", \"_saveasnew\", \"_continue\"])):\r\n return super(ServeeModelAdmin, self).response_change(request, obj)\r\n \r\n # we want to override the default save case in the frontend\r\n ref = request.META.get(\"HTTP_REFERER\")\r\n if ref and (ref.find(\"/servee/\") == -1):\r\n if request.is_ajax():\r\n return HttpResponse(\"<script type='text/javascript'>window.location.reload(true);</script>\")\r\n else:\r\n return HttpResponseRedirect(ref)\r\n \r\n # fallback to normal functionality\r\n return super(ServeeModelAdmin, self).response_change(request, obj)", "def inaccessible_callback(self, name, **kwargs):\n return redirect(url_for('index'))", "def inaccessible_callback(self, name, **kwargs):\n return redirect(url_for('index'))", "def test_logentry_get_admin_url(self):\n logentry = LogEntry.objects.get(content_type__model__iexact=\"article\")\n expected_url = reverse(\n \"admin:admin_utils_article_change\", args=(quote(self.a1.pk),)\n )\n self.assertEqual(logentry.get_admin_url(), expected_url)\n self.assertIn(\"article/%d/change/\" % self.a1.pk, logentry.get_admin_url())\n\n logentry.content_type.model = \"nonexistent\"\n self.assertIsNone(logentry.get_admin_url())", "def index(request):\r\n badRequest(\"Url not found\")", "def GET_adminoff(self):\r\n if not c.user.name in g.admins:\r\n return self.abort404()\r\n self.login(c.user, admin = False)\r\n\r\n dest = request.referer or '/'\r\n return self.redirect(dest)", "def test_file_upload_page_shows(self):\n url = reverse(\"comicmodels.views.upload_handler\",\n kwargs={\"site_short_name\":self.testproject.short_name})\n self._test_url_can_be_viewed(self.root,url) \n #self._test_url_can_be_viewed(self.root.username,url)", "def get_success_url(self, request):\n return request.POST.get('next', reverse('mod_admin'))", "def test_modify_access_noparams(self):\r\n url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url)\r\n self.assertEqual(response.status_code, 400)", "def view(self, url):\r\n abort(404)", "def check_admin(self, *args, **kwargs):\n if not users.is_current_user_admin():\n self.response.write(\n '<div style=\"padding-top: 200px; height:178px; width: 500px; color: white; margin: 0 auto; font-size: 52px; text-align: center; background: url(\\'http://3.bp.blogspot.com/_d_q1e2dFExM/TNWbWrJJ7xI/AAAAAAAAAjU/JnjBiTSA1xg/s1600/Bank+Vault.jpg\\')\">Forbidden Access <a style=\\'color: white;\\' href=\\'%s\\'>Login</a></div>' %\n users.create_login_url(self.request.path_url + self.request.query_string))\n return\n else:\n return handler(self, *args, **kwargs)", "def test_get_tab_no_admin(self):\n actions.login(self.NON_ADMIN_EMAIL, is_admin=False)\n response = self.get(self.TAB_URL, expect_errors=True)\n self.assertEquals(302, response.status_int)", "def index():\n redirect(URL('moderation','new_entries'))\n return dict()", "def test_user_change_page(self):\n url = reverse(\"admin:core_user_change\", args=[self.user.id])\n res = self.client.get(url)\n\n self.assertContains(res.status_code, 200)", "def test_an_admin_view_anonymous(client):\n response = client.get('/admin/')\n assert status(response) == 'found'\n assert response.url.startswith('/admin/login/')", "def urlfor( request, *args, **kwargs ):", "def admin():\n return redirect(url_for(\"user\", name=\"Admin!\"))", "def admin_update_preview():\n return user_management_handler(\"show_admin\", \"\", False)", "def admin_only():\n return 'Super-seekrit admin page.'", "def get_admin_url_to_add_run(self, request):\n base_url = reverse(\"admin:courses_courserun_add\")\n return f\"{base_url:s}?direct_course={self.id:d}\"", "def test_post_partial_update_admin(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n title = 'Random New Title Patched'\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n self.client.force_authenticate(user=self.superuser)\n response = self.client.patch(url, {'title': title}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn(title, response.content)\n self.assertIn(user_url, response.content)", "def test_no_question_for_admin(self):\n set_up_super_user(self)\n self.assertTrue(self.user.is_superuser)\n\n url = reverse('polls:detail', args=(1,))\n response = self.client.get(url)\n self.assertEqual(response.status_code, 404)", "def view(self, url):\n abort(404)", "def test_only_edit_perm(self):\n self.assertStatusCode(self.url, 403)", "def show_admin():\n return render_admin_page(\"admin.html\")", "def process_IN_ACCESS(self, event):", "def admin(request):\n if not request.user.is_staff:\n return render(request, 'manager/denied.html')\n return render(request, 'manager/index.html')", "def admin():\n pass # pragma: no cover", "def admin_required(handler):\n def admin_login(self, *args, **kwargs):\n auth = self.auth\n if not auth.get_user_by_session():\n self.redirect('/auth/login', abort=True)\n \n user = auth.get_user_by_session()\n queried_entity = User.get_by_id(user['user_id'])\n \n if queried_entity and queried_entity.phb_user_admin_status == 'admin-1':\n return handler(self, *args, **kwargs)\n else:\n self.redirect('/', abort = True)\n \n return admin_login", "def test_request_invalid_page(admin_client, public_resource_with_metadata):\n djangoresponse = admin_client.get('/discoverapi/?pnum=-20', follow=True)\n response = json.loads(djangoresponse.content.decode(\"utf-8\"))\n resources = response['resources']\n assert len(json.loads(resources)) == 0\n assert djangoresponse.status_code == 200", "def test_url_tranform(self):\r\n response = self.client.get('/courses/edx/math101/2014/wiki/math101/')\r\n self.assertIn('/courses/edx/math101/2014/wiki/math101/_edit/', response.content)\r\n self.assertIn('/courses/edx/math101/2014/wiki/math101/_settings/', response.content)", "def edit_url(self, inplace=False,show_url = 0, **kwargs):\n\n if len(kwargs) > 0 :\n other_args = [\"{}={}\".format(k,str(v).replace(\" \",\"+\")) for k,v in kwargs.items()]\n new_url = self.url + \"&\" + \"&\".join(other_args)\n if show_url: print(new_url) \n\n if \"maxresults\" not in kwargs : \n print(\"Be careful : This request will only display the first 100 results.\")\n\n if inplace:\n self.url = new_url", "def test_admin_accessible(self) -> None:\n response = self.client.get(\"/admin/\")\n self.assertEqual(200, response.status_code)", "def test_linked_ajax(self):\r\n\r\n # Setup the peer grading module with the proper linked location.\r\n peer_grading = self._create_peer_grading_with_linked_problem(self.coe_location)\r\n\r\n # If we specify a location, it will render the problem for that location.\r\n data = peer_grading.handle_ajax('problem', {'location': self.coe_location.to_deprecated_string()})\r\n self.assertTrue(json.loads(data)['success'])\r\n\r\n # If we don't specify a location, it should use the linked location.\r\n data = peer_grading.handle_ajax('problem', {})\r\n self.assertTrue(json.loads(data)['success'])", "def test_change_name_get_request(self):\r\n change_name_url = self.get_url()\r\n resp = self.client.get(change_name_url)\r\n self.assertEquals(resp.status_code, 405)", "def test_modify_access_bad_action(self):\r\n url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url, {\r\n 'unique_student_identifier': self.other_staff.email,\r\n 'rolename': 'staff',\r\n 'action': 'robot-not-an-action',\r\n })\r\n self.assertEqual(response.status_code, 400)", "def test_get_update_blog_post_anonymous_user(self):\n\n test_blog = Post.objects.get(title=\"test1\")\n url = reverse('blogs:updated', kwargs={'slug': test_blog.slug})\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, 403)", "def test_staff_access(self):\r\n out = self.c.get(self.url)\r\n print out\r\n self.assertTrue('Hints Awaiting Moderation' in out.content)", "def response_post_save_add(self, request, obj):\n opts = self.model._meta\n\n if \"next\" in request.GET:\n return HttpResponseRedirect(request.GET['next'])\n\n if self.has_change_permission(request, None):\n post_url = reverse('admin:%s_%s_changelist' %\n (opts.app_label, opts.module_name),\n args=(quote(self.prescription.pk),),\n current_app=self.admin_site.name)\n else:\n post_url = reverse('admin:index',\n current_app=self.admin_site.name)\n\n return HttpResponseRedirect(post_url)", "def test_public_status_page_patch_public_status_page(self):\n pass", "def scan_admin_url():\r\n target_admin_url=provided_url+\"/administrator/index.php\"\r\n if verbose_flag: print \"\\t[.] Trying to access admin login page...\", #+ target_admin_url\r\n try:\r\n response = urllib2.urlopen(target_admin_url)\r\n except HTTPError, e:\r\n admin_flag=0\r\n #print \"admin flag=\"+str(admin_flag)\r\n if verbose_flag: print \"Failed\"\r\n return admin_flag\r\n else:\r\n admin_flag=1\r\n #print \"admin flag=\"+str(admin_flag)\r\n if verbose_flag: print \"Success\"\r\n return admin_flag", "def test_post_partial_update_unauthorized(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n title = 'Random New Title'\n response = self.client.patch(url, {'title': title}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_detail_blocked_forbidden_even_if_contributor(self):\n self.login(self.user1)\n resp = self.client.get(self.get_url(self.c3.pk))\n self.assert404(resp)", "def test_user_not_logged_in_redirects_from_change_email(self):\n get_response = self.get_change_email()\n post_response = self.post_change_email()\n self.assertRedirects(get_response, self.login_url)\n self.assertRedirects(post_response, self.login_url)", "def index(request):\n return utility.respond(request, 'admin/index')", "def test_url_is_accessible_to_externals(self):\n\n res = self.client.get(self.url)\n self.assertEqual(res.status_code, 200)\n\n self.user.is_external = False\n self.user.save()\n res = self.client.get(self.url)\n self.assertEqual(res.status_code, 404)", "def test_01_admin_index_anonymous(self):\r\n res = self.app.get(\"/admin\", follow_redirects=True)\r\n err_msg = (\"The user should not be able to access this page\"\r\n \" but the returned status is %s\" % res.data)\r\n assert \"Please sign in to access this page\" in res.data, err_msg", "def test_admin_url(self):\n url = reverse('admin:index')\n\n self.assertEqual(url, '/admin/')", "def test_view_url_accessible_by_name(self):\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)", "def test_no_permission(client):\n user = user_with_permissions()\n\n url = reverse(\"admin:index\")\n client.force_login(user)\n\n response = client.get(url)\n assert parse_sidemenu(response) == {\"Global\": [\"/en/admin/\"]}", "async def admin_server_url(self, ctx: commands.Context, *url: str):\n the_url = await self.config.server_url()\n url = ' '.join(url)\n if not url:\n await ctx.author.send(f'Team management server url: {the_url}')\n else:\n await self.config.server_url.set(url)\n message = [display(ctx.author),\n f'set the team management server url to {url}.']\n if the_url:\n message.append(f'(was `{the_url}`)')\n await self.admin_msg(' '.join(message))", "def test_an_admin_view(admin_client):\n response = admin_client.get('/admin/')\n assert status(response) == 'ok'", "def url(self):\n ...", "def test_read_not_admin2(self):\n self.client.force_authenticate(user=self.user)\n\n response = self.client.get(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 1},\n ),\n )\n\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def cant_view_event(self, event, request):\n if request.user.is_authenticated():\n return redirect('main:permission_denied', event.slug)\n else:\n desired_url = reverse('main:event', args=(event.slug,))\n url = reverse('main:login')\n return redirect('%s?next=%s' % (url, urllib.quote(desired_url)))", "def is_admin(context):\n request = context[\"request\"]\n url = resolve(request.path)\n context['is_admin'] = False\n return url.app_name == 'admin'", "def test_post_update_unauthorized(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_update_should_not_be_allowed(self):\n response = self.client.put(self.get_url(), {})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)", "def test_missing_params(self):\r\n url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url)\r\n self.assertEqual(response.status_code, 400)", "def test_public_status_page_post_public_status_page(self):\n pass", "def test_instructor_page_access_nonstaff(self):\r\n self.login(self.enrolled_user)\r\n\r\n urls = [reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()}),\r\n reverse('instructor_dashboard', kwargs={'course_id': self.test_course.id.to_deprecated_string()})]\r\n\r\n # Shouldn't be able to get to the instructor pages\r\n for url in urls:\r\n check_for_get_code(self, 404, url)", "def test_bad_action(self):\r\n action = 'robot-not-an-action'\r\n url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n response = self.client.get(url, {'identifiers': self.beta_tester.email, 'action': action})\r\n self.assertEqual(response.status_code, 400)", "def test_update_view_redirect_anauth_users_to_login_page(self):\n response = self.client.get('/groups/new_group/edit/')\n self.assertEqual(response.status_code, 302)\n self.assertIn(\"login\", response.url)", "def test_can_access_admin(self):\n\n #Homepage\n self.browser.get(self.live_server_url + '/admin/')\n\n body = self.browser.find_element_by_tag_name('body')\n\n self.assertIn('Django administration',body.text,\"Cannot get to /admin/\")", "def test_the_data_edit_url(self):\n\n my_instance = Contact.objects.first()\n info_url = resolve('/to_form/%s/' % my_instance.id)\n self.assertEqual(info_url.func.__name__, 'my_edit_data')\n self.assertEqual(self.response.status_code, 200)", "def admin(request):\n if not request.user.is_staff:\n return render_to_response('error.htm', {\n 'error': \"Sorry, you are not staff... (user permissions 'is_staff')\",\n })\n return render_to_response('admin.htm', {\n 'username': request.user,\n })", "def web_index():\n\n try:\n auth_check()\n except Exception as e:\n return flask.redirect(str(e))\n\n db_update_archives()\n\n return flask.redirect('videos')", "def adminlogin(request):\n request.GET = request.GET.copy()\n request.GET['redirect'] = '/annaleut/admin/'\n return auth_views.login(request, template_name='admin/login.html',\n redirect_field_name='redirect')", "def idx(_request):\n return HttpResponseRedirect('/home')", "def show_admin_edit_users():\n return render_admin_page(\"admin-eu.html\")", "def check_admin():\n\tif not current_user.is_admin:\n\t\tabort(403)", "def stock(request):\n if not request.user.is_staff:\n return NO_PERMISSION\n return {}", "def __before__(self):\n \n if not u'REMOTE_USER' in session: \n if not request.environ[u'PATH_INFO'] in self.public_urls:\n log.debug('PATH_INFO: %s' % request.environ[u'PATH_INFO'])\n #session[u'path_before_login'] = request.environ[u'PATH_INFO']\n #session.save()\n redirect(url('/users/index'))", "def test_fac_admin_page(self):\n self.login(self.fac_admin.user.username)\n self._got_to_fac_admin_page()\n self.check_page_title(self.admin_config.get('FAC_ADMIN').get('PAGE_TITLE'))\n self.check_page_contains_ids(self.admin_config.get('FAC_ADMIN').get('ADMIN_LINKS'))" ]
[ "0.6304963", "0.60231835", "0.5975458", "0.59199995", "0.57574075", "0.5661228", "0.56567496", "0.5646688", "0.56410176", "0.5619408", "0.5619408", "0.5565735", "0.5562113", "0.5547753", "0.5510401", "0.55066466", "0.54986465", "0.54867476", "0.54704493", "0.54627734", "0.5448876", "0.5448876", "0.5429229", "0.5409688", "0.5406041", "0.53909856", "0.5355159", "0.5354408", "0.5354408", "0.53482854", "0.53280336", "0.5327791", "0.5324801", "0.5303572", "0.52971935", "0.5292696", "0.52901006", "0.5286303", "0.5284699", "0.5277642", "0.5271449", "0.52617306", "0.5258354", "0.5226321", "0.52199644", "0.5212393", "0.5181351", "0.5167852", "0.51650447", "0.5162401", "0.5141304", "0.5136682", "0.51361144", "0.51333624", "0.510815", "0.5107039", "0.5101043", "0.5097633", "0.50925016", "0.50900924", "0.50874007", "0.50811", "0.5072602", "0.5061369", "0.50563", "0.5052521", "0.5044931", "0.5038134", "0.50366724", "0.50346", "0.5032354", "0.5030343", "0.502868", "0.5025092", "0.50198877", "0.5016596", "0.5011911", "0.5007083", "0.49946946", "0.49870622", "0.49702626", "0.49675676", "0.49659696", "0.4965634", "0.4951838", "0.4951013", "0.49305305", "0.49268332", "0.4924333", "0.49165335", "0.49116725", "0.49086794", "0.49061334", "0.49030387", "0.49022734", "0.4901839", "0.4901004", "0.48998198", "0.48994204", "0.489909" ]
0.6291411
1
If `_autoclose` is in the URL, that + `_data_changed` should propagate to the next redirect URL for the purposes of our adminlinks JS.
def test_autoclose_chunkadmin(self): user = User(username='test', is_staff=True, is_superuser=True, is_active=True) user.set_password('test') user.full_clean() user.save() admin_instance = get_modeladmin(Iframe) self.assertIsInstance(admin_instance, RealishAdmin) request = RequestFactory().get('/', { '_autoclose': 1, }) request.user = user iframe_admin = reverse('admin:embeds_iframe_add') response_301 = HttpResponsePermanentRedirect(redirect_to=iframe_admin) ct = get_content_type(User) iframe = Iframe(position=2, region='test', content_type=ct, content_id=user.pk, url='https://news.bbc.co.uk/') iframe.full_clean() iframe.save() new_response = admin_instance.maybe_fix_redirection( request=request, response=response_301, obj=iframe) self.assertEqual(new_response['X-Chunkadmin-Response'], 'autoclose') self.assertEqual(301, new_response.status_code) location, querystring = new_response['Location'].split('?') self.assertEqual('/admin_mountpoint/embeds/iframe/add/', location) self.assertIn('region=test', querystring) self.assertIn('_data_changed=1', querystring) self.assertIn('_autoclose=1', querystring) self.assertIn('content_type={0}'.format(ct.pk), querystring) self.assertIn('content_id={0}'.format(iframe.pk), querystring)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def response_post_save_change(self, request, obj):\n opts = self.model._meta\n\n if \"next\" in request.GET:\n return HttpResponseRedirect(request.GET['next'])\n\n if self.has_change_permission(request, None):\n post_url = reverse('admin:%s_%s_changelist' %\n (opts.app_label, opts.module_name),\n args=(quote(self.prescription.pk),),\n current_app=self.admin_site.name)\n else:\n post_url = reverse('admin:index',\n current_app=self.admin_site.name)\n\n return HttpResponseRedirect(post_url)", "def response_change(self, request, obj):\r\n \r\n # in these cases, the redirect is good\r\n if list(set(request.POST.keys()) & set([\"_addanother\", \"_saveasnew\", \"_continue\"])):\r\n return super(ServeeModelAdmin, self).response_change(request, obj)\r\n \r\n # we want to override the default save case in the frontend\r\n ref = request.META.get(\"HTTP_REFERER\")\r\n if ref and (ref.find(\"/servee/\") == -1):\r\n if request.is_ajax():\r\n return HttpResponse(\"<script type='text/javascript'>window.location.reload(true);</script>\")\r\n else:\r\n return HttpResponseRedirect(ref)\r\n \r\n # fallback to normal functionality\r\n return super(ServeeModelAdmin, self).response_change(request, obj)", "def test_returned_data_changed(self):\n request = RequestFactory().get('/')\n admin_instance = get_modeladmin(Iframe)\n response_302 = HttpResponseRedirect(redirect_to='/admin_mountpoint/')\n new_response = admin_instance.maybe_fix_redirection(\n request=request, response=response_302)\n # returned early because it was a redirect, but we updated the\n # querystring anyway\n self.assertEqual(new_response['X-Chunkadmin-Response'], 'early')\n self.assertEqual(302, new_response.status_code)\n self.assertEqual('/admin_mountpoint/?_data_changed=1',\n new_response['Location'])", "def response_post_save_add(self, request, obj):\n opts = self.model._meta\n\n if \"next\" in request.GET:\n return HttpResponseRedirect(request.GET['next'])\n\n if self.has_change_permission(request, None):\n post_url = reverse('admin:%s_%s_changelist' %\n (opts.app_label, opts.module_name),\n args=(quote(self.prescription.pk),),\n current_app=self.admin_site.name)\n else:\n post_url = reverse('admin:index',\n current_app=self.admin_site.name)\n\n return HttpResponseRedirect(post_url)", "def redirect_on_exit_url(self, redirect_on_exit_url):\n\n self._redirect_on_exit_url = redirect_on_exit_url", "def response_add(self, request, obj):\r\n\r\n # in these cases, the redirect is good\r\n if list(set(request.POST.keys()) & set([\"_addanother\", \"_continue\"])):\r\n return super(ServeeModelAdmin, self).response_change(request, obj)\r\n\r\n # we want to override the default save case in the frontend\r\n ref = request.META.get(\"HTTP_REFERER\")\r\n if ref and (ref.find(\"/servee/\") == -1):\r\n if request.is_ajax():\r\n return HttpResponse(\"<script type='text/javascript'>window.location.reload(true);</script>\")\r\n else:\r\n return HttpResponseRedirect(ref)\r\n\r\n # fallback to normal functionality\r\n return super(ServeeModelAdmin, self).response_add(request, obj)", "def response_change(self, request, obj):\n if request.POST.has_key(\"_viewnext\"):\n msg = (_('The %(name)s \"%(obj)s\" was changed successfully.') %\n {'name': force_unicode(obj._meta.verbose_name),\n 'obj': force_unicode(obj)})\n next = obj.__class__.objects.filter(xt_id_mb__gt=obj.xt_id_mb).order_by('xt_id_mb')[:1]\n if next:\n self.message_user(request, msg)\n return HttpResponseRedirect(\"../%s/\" % next[0].pk)\n return super(mbAdmin, self).response_change(request, obj)", "def response_change(self, request, obj):\n if request.POST.has_key(\"_viewnext\"):\n msg = (_('The %(name)s \"%(obj)s\" was changed successfully.') %\n {'name': force_unicode(obj._meta.verbose_name),\n 'obj': force_unicode(obj)})\n next = obj.__class__.objects.filter(id_xt_lab__gt=obj.id_xt_lab).order_by('id_xt_lab')[:1]\n if next:\n self.message_user(request, msg)\n return HttpResponseRedirect(\"../%s/\" % next[0].pk)\n return super(xtlabAdmin, self).response_change(request, obj)", "def response_post_save_change(self, request, obj):\n url = reverse('admin:prescription_prescription_detail',\n args=[str(obj.id)])\n return HttpResponseRedirect(url)", "def response_change(self, request, obj):\n if request.POST.has_key(\"_viewnext\"):\n msg = (_('The %(name)s \"%(obj)s\" was changed successfully.') %\n {'name': force_unicode(obj._meta.verbose_name),\n 'obj': force_unicode(obj)})\n next = obj.__class__.objects.filter(id_xt_mcce__gt=obj.id_xt_mcce).order_by('id_xt_mcce')[:1]\n if next:\n self.message_user(request, msg)\n return HttpResponseRedirect(\"../%s/\" % next[0].pk)\n return super(mcceAdmin, self).response_change(request, obj)", "def response_change(self, request, obj):\n if request.POST.has_key(\"_viewnext\"):\n msg = (_('The %(name)s \"%(obj)s\" was changed successfully.') %\n {'name': force_unicode(obj._meta.verbose_name),\n 'obj': force_unicode(obj)})\n next = obj.__class__.objects.filter(id_xt_pc__gt=obj.id_xt_pc).order_by('id_xt_pc')[:1]\n if next:\n self.message_user(request, msg)\n return HttpResponseRedirect(\"../%s/\" % next[0].pk)\n return super(pcAdmin, self).response_change(request, obj)", "def response_change(self, request, obj):\n if request.POST.has_key(\"_viewnext\"):\n msg = (_('The %(name)s \"%(obj)s\" was changed successfully.') %\n {'name': force_unicode(obj._meta.verbose_name),\n 'obj': force_unicode(obj)})\n next = obj.__class__.objects.filter(id_xt_sust__gt=obj.id_xt_sust).order_by('id_xt_sust')[:1]\n if next:\n self.message_user(request, msg)\n return HttpResponseRedirect(\"../%s/\" % next[0].pk)\n return super(xt_sustanciasAdmin, self).response_change(request, obj)", "def response_change(self, request, obj):\n if request.POST.has_key(\"_viewnext\"):\n msg = (_('The %(name)s \"%(obj)s\" was changed successfully.') %\n {'name': force_unicode(obj._meta.verbose_name),\n 'obj': force_unicode(obj)})\n next = obj.__class__.objects.filter(id_xt_pcce__gt=obj.id_xt_pcce).order_by('id_xt_pcce')[:1]\n if next:\n self.message_user(request, msg)\n return HttpResponseRedirect(\"../%s/\" % next[0].pk)\n return super(pcceAdmin, self).response_change(request, obj)", "def response_change(self, request, obj):\n if request.POST.has_key(\"_viewnext\"):\n msg = (_('The %(name)s \"%(obj)s\" was changed successfully.') %\n {'name': force_unicode(obj._meta.verbose_name),\n 'obj': force_unicode(obj)})\n next = obj.__class__.objects.filter(id_xt_mc__gt=obj.id_xt_mc).order_by('id_xt_mc')[:1]\n if next:\n self.message_user(request, msg)\n return HttpResponseRedirect(\"../%s/\" % next[0].pk)\n return super(mcAdmin, self).response_change(request, obj)", "def redirect(url):", "def on_before_close(self):\n pass", "def redirect_to_original_url(query_short_url):\n db_url = Url.query.filter_by(short_url=query_short_url).first_or_404()\n db_url.views += 1\n db.session.commit()\n return redirect(db_url.original_url)", "def url_event_listener():\n track_template = \"<a href=\\\"{0}\\\" target=\\\"_blank\\\" onclick=\\\"trackOutboundLink('{0}'); return false;\\\"\"\n if request.method == 'POST':\n urls = request.form['url_textbox']\n track_urls = [track_template.format(url.strip()) for url in urls.split('\\n')]\n return render_template('link_tracking.html', links=track_urls)\n return render_template('link_tracking.html', links=[])", "def process_IN_CLOSE_WRITE(s, event):\n s.doReload(event)", "def link_new_callback(self):\n pass", "def after_link_issue(self, external_issue, **kwargs):\n pass", "def test_to_other_url(self):\n user = User(username='test', is_staff=True, is_superuser=True,\n is_active=True)\n user.set_password('test')\n user.full_clean()\n user.save()\n request = RequestFactory().get('/')\n response_302 = HttpResponseRedirect(redirect_to='/admin_mountpoint/')\n admin_instance = get_modeladmin(Iframe)\n new_response = admin_instance.maybe_fix_redirection(\n request=request, response=response_302, obj=user)\n self.assertEqual(new_response['X-Chunkadmin-Response'], 'not-chunkadmin') # noqa\n self.assertEqual(302, new_response.status_code)\n self.assertEqual('/admin_mountpoint/?_data_changed=1',\n new_response['Location'])", "def response_post_save_change(self, request, obj):\n\n # Default response\n resp = super(StoryAdmin, self).response_post_save_change(request, obj)\n\n # Check that you clicked the button `_save_and_copy`\n if '_accept_story' in request.POST:\n # Accept the Story, and get the copy new CuratedStory\n created, problems = self.accept_story(obj)\n if created != 1:\n msg = \"Could not accept Story, one already exists for %s\"\n message = msg % obj.person.name\n self.message_user(request, message, level=messages.ERROR)\n return resp\n\n new_obj = CuratedStory.objects.get(story=obj)\n\n # Get its admin url\n opts = CuratedStory._meta\n info = opts.app_label, opts.model_name\n route = 'admin:{}_{}_change'.format(*info)\n post_url = reverse(route, args=(new_obj.pk,))\n\n # Inform the user they are now editting the CuratedStory\n self.message_user(request, \"Now editting the Curated Story\")\n\n # And redirect to it\n return HttpResponseRedirect(post_url)\n elif '_reject_story' in request.POST:\n # Reject the stories, and return the default response\n queryset = Story.objects.filter(pk=obj.pk)\n self.reject_stories(queryset)\n return resp\n else:\n # Otherwise, just use default behavior\n return resp", "def cog_unload(self):\n self.resend_post.cancel()", "def redirect_view(request, short_url):\n try:\n if request.method == 'GET':\n shortener = ShortenedURL.objects.get(short_url=short_url)\n shortener.times_visited += 1\n shortener.save()\n return HttpResponseRedirect(shortener.long_url)\n except ShortenedURL.DoesNotExist:\n return HttpResponse(status=404)", "def on_connection_closed(self):", "def after_request(self, response):\n # only track data for specified blueprints\n if self.blueprints:\n if request.blueprint not in self.blueprints:\n return response\n\n t_0 = getattr(g, 'start_time', dt.datetime.now())\n\n visit = dict(\n session_id=session.get('UUID', 0),\n timestamp=timestamp(),\n url=request.url,\n view_args=request.view_args,\n status_code=response.status_code,\n path=request.path,\n latency=(dt.datetime.now() - t_0).microseconds / 100000,\n content_length=response.content_length,\n referer=request.referrer,\n values=request.values\n )\n self.store_visit(visit)\n self.update_top_list(request.path)\n return response", "def change_abandoned(self, event):\n pass", "def html_redirect(self):\n soup = BeautifulSoup(self.contents, \"lxml\")\n meta = soup.find('meta', **{'http-equiv': 'refresh'})\n assert meta is not None, 'No <meta http-equiv=\"refresh\" /> tag found.'\n url = meta.get('content').partition(';url=')[2]\n self.open(url)", "def _after_serve_actions(self):\n pass", "def checkForURL(self, data):\n \n moduleCoordinator.ModuleCoordinator().addEvent(moduleCoordinator.URL_EVENT, data, self.hash)", "def _after_open(self, *args):\n\n if not self.is_artella_path():\n return\n\n self.validate_environment_for_callback('AfterOpen')", "def get_success_url(self, request):\n return request.POST.get('next', reverse('mod_admin'))", "def link_edit_callback(self):\n pass", "def handle_close(self):\r\n self._connection_state = STATE_DISCONNECTED\r\n super(http_evented, self).handle_close()\r\n self._fail_all_pending_event_handlers()\r\n call_if_not_none_and_callable(self._onClose)", "def redirect(self, url):\n raise RequestRedirect(url)", "def link_redirect(request, shortened_url: str):\n try:\n url = Url.objects.get(short_url=shortened_url)\n long_url = url.long_url\n return HttpResponseRedirect(long_url)\n except Url.DoesNotExist or TypeError:\n return HttpResponseBadRequest(\"Wrong url\")", "def overwrite_url_allowed(self, overwrite_url_allowed):\n\n self._overwrite_url_allowed = overwrite_url_allowed", "def onClose(self, event):\n pass", "def onClose(self, event):\n pass", "def onClose(self, event):\n pass", "def add_redirect(self, url1, url2):\n self._load_redirects()\n if (url1 != url2):\n self.redirects[url1] = url2\n redirectsfile = osp.join(self.basepath, 'redirects.csv')\n f = open(redirectsfile, 'a')\n csvout = csv.writer(f, delimiter=',', quotechar='\"')\n csvout.writerow([url1.encode('UTF-8'), url2.encode('UTF-8')])\n f.close()", "def eastgardens(event, context):\n\n request = event['Records'][0]['cf']['request']\n path = request['uri']\n query = request['querystring']\n\n # prepend a ? if there is a query\n if query != '':\n query = '?' + query\n\n # Path+query based custom redirects get checked first\n if path + query in variables.REDIRECTS:\n return redirect(variables.REDIRECTS[path + query])\n\n # Now check path only custom redirects\n if path in variables.REDIRECTS:\n return redirect(variables.REDIRECTS[path])\n\n return handle_fallthrough(event, path, query)", "def perform_post_request_actions(self):\n self._close_old_django_connections()", "def close(request, post, comment, **kwargs):\n user = request.user\n Post.objects.filter(uid=post.uid).update(status=Post.CLOSED)\n # Generate a rationale post on why this post is closed.\n context = dict(comment=comment)\n rationale = mod_rationale(post=post, user=user,\n template=\"messages/closed.md\",\n extra_context=context)\n msg = f\"Closed {post_link(post)}. \"\n url = rationale.get_absolute_url()\n messages.info(request, mark_safe(msg))\n db_logger(user=user, text=f\"{msg} ; post.uid={post.uid}.\")\n return url", "def onClose (self):\n \n pass", "def spider_closed(self, spider):\n lenOfdeadUrls = len(self.deadurldict['urls'])\n logging.info('spidername ' + self.name + '!!!')\n logging.info('visitedurls' + str(len(self.visitedurldict['urls'])))\n logging.info('datadict ' + str(len(self.datadict['datas'])))\n logging.info('filedict ' + str(len(self.filedict['files'])))\n logging.info('deadurls ' + str(len(self.deadurldict['urls'])))\n\n if (lenOfdeadUrls==10):\n unirest.timeout(180)\n resdeadurl = unirest.put(\n \"http://192.168.100.3:5000/deadurls\",\n headers={ \"Accept\": \"application/json\", \"Content-Type\": \"application/json\" },\n params=json.dumps(self.deadurldict)\n )\n\n elif(lenOfdeadUrls==0):\n unirest.timeout(180)\n resvisitedurl = unirest.put(\n \"http://192.168.100.3:5000/visitedurls\",\n headers={ \"Accept\": \"application/json\", \"Content-Type\": \"application/json\" },\n params=json.dumps(self.visitedurldict)\n )\n unirest.timeout(180)\n resdata = unirest.put(\n \"http://192.168.100.3:5000/data\",\n headers={ \"Accept\": \"application/json\", \"Content-Type\": \"application/json\" },\n params=json.dumps(self.datadict)\n )\n unirest.timeout(180)\n resfile = unirest.put(\n \"http://192.168.100.3:5000/file\",\n headers={ \"Accept\": \"application/json\", \"Content-Type\": \"application/json\" },\n params=json.dumps(self.filedict)\n )\n\n else:# lenOfdeadUrls in (0,10)\n unirest.timeout(180)\n resvisitedurl = unirest.put(\n \"http://192.168.100.3:5000/visitedurls\",\n headers={ \"Accept\": \"application/json\", \"Content-Type\": \"application/json\" },\n params=json.dumps(self.visitedurldict)\n )\n unirest.timeout(180)\n resdata = unirest.put(\n \"http://192.168.100.3:5000/data\",\n headers={ \"Accept\": \"application/json\", \"Content-Type\": \"application/json\" },\n params=json.dumps(self.datadict)\n )\n unirest.timeout(180)\n resfile = unirest.put(\n \"http://192.168.100.3:5000/file\",\n headers={ \"Accept\": \"application/json\", \"Content-Type\": \"application/json\" },\n params=json.dumps(self.filedict)\n )\n unirest.timeout(180)\n resdeadurl = unirest.put(\n \"http://192.168.100.3:5000/deadurls\",\n headers={ \"Accept\": \"application/json\", \"Content-Type\": \"application/json\" },\n params=json.dumps(self.deadurldict)\n )", "def redirect_url(self, redirect_url):\n\n self._redirect_url = redirect_url", "def redirect(self):\n new_url = self.server.url + options.script_alias + '/'\n self.send_response(301, \"Moved (redirection follows)\")\n self.send_header(\"Content-type\", \"text/html\")\n self.send_header(\"Location\", new_url)\n self.end_headers()\n self.wfile.write(\"\"\"<html>\n<head>\n<meta http-equiv=\"refresh\" content=\"1; URL=%s\">\n</head>\n<body>\n<h1>Redirection to <a href=\"%s\">ViewVC</a></h1>\nWait a second. You will be automatically redirected to <b>ViewVC</b>.\nIf this doesn't work, please click on the link above.\n</body>\n</html>\n\"\"\" % tuple([new_url]*2))", "def response_change(self, request, obj):\n opts = obj._meta\n\n msg = 'The menu item \"%s\" was changed successfully.' % force_unicode(obj)\n\n if \"_continue\" in request.POST:\n self.message_user(request, msg + ' ' + \"You may edit it again below.\")\n return HttpResponseRedirect(request.path)\n\n elif \"_addanother\" in request.POST:\n self.message_user(request, msg + ' ' + (\"You may add another %s below.\" % force_unicode(opts.verbose_name)))\n return HttpResponseRedirect(obj.menu_item.menu.get_add_page_url())\n\n else:\n self.message_user(request, msg)\n return HttpResponseRedirect(obj.menu_item.menu.get_edit_url())", "def unshorten_redirect(self, hashed):\n link_data = self.get_link_data(hashed)\n if link_data is None:\n abort(404, 'Shortened URL not found')\n else:\n self.link_db[hashed]['lookups'] += 1\n\n full_link = link_data['full_link']\n\n redirect(full_link)\n self.link_db.sync()", "def on_deactivate(self) -> None:", "def on_page_changing(e):\n\n e.Skip()", "def spider_closed(spider):\n spider.crawler.stats.set_value('failed_urls', ','.join(spider.failed_urls))", "def is_redirect(response: aiohttp.ClientResponse) -> bool:\n return response.status in (300, 301, 302, 303, 307)", "def handleClose(self):\n logging.info(\"%s %s\", self.address, \"closed\")\n self.logbook.clients_disconnected_count += 1", "def anchor_browser_callback(self, client_data):\n pass", "def process_IN_OPEN(self, event):", "def source():\n return redirect(get_last_menus_url())", "def old_post_save(model, os_path, contents_manager):\n os_path.append(\"old_post_save\")", "def after_successful_edit(self):\n pass", "def onClose(self, wasClean, code, reason):", "def onfinish( request ):", "def onfinish( request ):", "def setLinkAlive(self, url):\n if url in self.historyDict:\n #test output\n #pywikibot.output('[%s] setLinkAlive: SEM acquire [%s]' % (datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),url))\n self.semaphore.acquire()\n try:\n del self.historyDict[url]\n except KeyError:\n # Not sure why this can happen, but I guess we can ignore this.\n pass\n #test output\n #pywikibot.output('[%s] setLinkAlive: SEM release [%s]' % (datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),url))\n self.semaphore.release()\n return True\n else:\n return False", "def post(self) :\n self.redirect('/admin')", "def _on_close(self):\n self.shell_obj.closed()", "def redirect(self, url):\n # todo: use Flask's redirect support\n seen_urls = {url}\n from_url = url\n while True:\n to_url = self.get(from_url)\n if to_url is None:\n break\n if to_url in seen_urls:\n raise RedirectException('Saw redirect loop with key {0}'.format(url))\n from_url = to_url\n return from_url", "def _handle_popup_close(self):\n self._refresh()", "def record_post_forward(self, handles: List[FlatParamHandle]) -> None:\n if not handles:\n return\n handles_key = tuple(handles)\n # Only record the first usage of a handles key\n if handles_key in self.handles_to_post_forward_order_index:\n return\n index = len(self.handles_post_forward_order)\n self.handles_to_post_forward_order_index[handles_key] = index\n self.handles_post_forward_order.append(handles_key)", "def _wrapped_handler_ref_changed(self, wrapped_handler_ref):\n if self.next is not None:\n self.next.wrapped_handler_ref = wrapped_handler_ref", "def pre_close(self, cr, uid, ids, context={}):\n self.write(cr, uid, ids, {'state': 'preclose'}, context=context)\n return True", "def process_IN_CLOSE_WRITE(self, event):\n self.git.post_change(event.pathname, commit_msg=\"dotfile_tracker update: \"+event.pathname)", "def on_unload(self):\n pass", "def on_closing(self, *args):\n pass", "def middleware_after(self):\n pass", "def add_new_url(self, url):\n if url is None:\n return \n if url not in self.new_urls and url not in self.old_urls:\n self.new_urls.add(url)", "def _on_response(self):\n request = self._requests.pop(0)\n try:\n request[-1].cancel()\n left = request[-1].end - Engine.instance().time\n except Exception:\n left = request[5]\n pass\n\n response = self.current_response\n\n close_after = response.headers.get('Connection', '') == 'close'\n close_after &= self.keep_alive\n\n # Is this a 100 Continue?\n if response.status == 100:\n self.current_response = None\n del response\n\n # Process the request.\n if close_after:\n if self._stream:\n self._stream.close()\n return\n\n self._process_request()\n return\n\n # Did we catch a redirect?\n if response.status in (301,302) and request[9] <= self.max_redirects:\n # Generate a new request, using the new URL.\n new_url = urlparse.urljoin(response.full_url,\n response.headers['Location'])\n\n new_headers = request[3].copy()\n del new_headers['Host']\n\n new_req = self._add_request(request[0], new_url, new_headers,\n request[4], left, False)\n new_req[6] = request[6]\n new_req[7] = request[7]\n new_req[9] = request[9] + 1\n\n new_req.append(\n Engine.instance().defer(left, self._request_timeout, new_req))\n\n self._requests.insert(0, new_req)\n self.current_response = None\n del response\n\n # Process the request.\n if close_after:\n if self._stream:\n self._stream.close()\n return\n\n self._process_request()\n return\n\n # Try converting to unicode?\n if self.unicode:\n content_type = response.headers.get('Content-Type','')\n if 'charset=' in content_type:\n content_type, _, encoding = content_type.partition('charset=')\n try:\n response.body = response.body.decode(encoding)\n except (LookupError, UnicodeDecodeError):\n pass\n\n # Determine the handler function to use.\n if callable(request[6]):\n func = request[6]\n else:\n func = self.on_response\n\n # Call the handler function.\n try:\n func(0, response)\n except Exception:\n log.exception('Error in HTTP response handler.')\n\n # Process the next request.\n self.current_response = None\n\n if close_after:\n if self._stream:\n self._stream.close()\n return\n\n self._process_request()", "def close_as_lost(case):\n # we should never be closing something as lost before the ltfu_date\n assert(case.ltfu_date <= datetime.utcnow().date())\n case.manual_close(const.Outcome.LOST_TO_FOLLOW_UP, \n datetime.combine(case.ltfu_date, time()))", "def __get_redirect_url(self):\n if self.get_submit_save_and_continue_edititing_button_name() not in self.request.POST:\n return self.request.cradmin_app.reverse_appindexurl()\n return self.request.cradmin_app.reverse_appurl(\n 'groupcomment-edit',\n args=self.args,\n kwargs=self.kwargs)", "def on_deactivate(self):", "def post_save_page(instance, raw, created, **kwargs):\n old_page = instance.old_page\n del(instance.old_page)\n \n if settings.CMS_MODERATOR:\n # tell moderator something was happen with this page\n from cms.utils.moderator import page_changed\n page_changed(instance, old_page)", "def slot_history_changed(self, _sender, _data):\r\n last_candle = self.history.last_candle()\r\n if last_candle:\r\n self.client.history_last_candle = last_candle.tim", "def post_add_link(self):\n course = courses.Course(self)\n link = course.add_link()\n link.href = ''\n course.save()\n self.redirect(self.get_action_url(\n 'edit_link', key=link.unit_id, extra_args={'is_newly_created': 1}))", "def locking_admin_changelist_js_url(self):\n return reverse('admin:' + self.locking_admin_changelist_js_url_name)", "def get_success_url(self):\n return self.request.GET.get(\n 'next',\n super().get_success_url()\n )", "def gonext():\n next = cherrypy.request.params.get('next', '')\n if next != '':\n redirect(next)", "def get(self, request, slug=None):\n if (slug):\n email_link = EmailLinks.objects.get(link_tracking_id=slug)\n email_link.clicked_time = datetime.now()\n email_link.clicked_status = True\n email_link.save()\n redirect_url = email_link.link_url\n return HttpResponseRedirect(redirect_to=redirect_url)", "def changelist_view(self, request, extra_context=None):\n return HttpResponseRedirect(reverse('admin:index'))", "def changelist_view(self, request, extra_context=None):\n return HttpResponseRedirect(reverse('admin:index'))", "def _handler_direct_access_exit(self, *args, **kwargs):", "def _on_close(self):\n self.web_socket_open = False\n self.logged_in = False\n print(\"WebSocket Closed for \" + self.session_name)\n\n if not self.disconnected_by_user:\n print(\"Reconnect to the endpoint for \" + self.session_name + \" after 3 seconds... \")\n time.sleep(3)\n self.connect()", "def default_after_end_session_hook(\n request, id_token=None, post_logout_redirect_uri=None,\n state=None, client=None, next_page=None):\n return None", "def update_url(self):\n url = urljoin(getattr(self.parent, 'url', '') + '/', self.slug)\n if url != self.url:\n self.url = url\n self.save()\n\n for child in self.get_children():\n child.update_url()", "def prehistory_recept(self, userdialog):\n\n return False, None", "def inaccessible_callback(self, name, **kwargs):\n return redirect(url_for('public.home', next=request.url))", "def end_oauth_view(request):\n auth_code = request.GET[\"code\"]\n save_token(auth_code)\n url = reverse(\"admin:actionstep_accesstoken_changelist\")\n return redirect(url)", "def set_close_callback( callback ):", "def nextURL(self):\n return self.wizard.nextURL", "def process_IN_MOVED_TO(self, event):" ]
[ "0.5717953", "0.56154025", "0.5552884", "0.5355236", "0.5205197", "0.5178103", "0.49210623", "0.48757377", "0.48716828", "0.48654342", "0.48438132", "0.48274982", "0.48240012", "0.48213187", "0.47654843", "0.47389874", "0.46854186", "0.46743634", "0.46646327", "0.463678", "0.46236932", "0.46202537", "0.4605772", "0.45826113", "0.45722023", "0.45464882", "0.45457536", "0.4532105", "0.4526008", "0.45164093", "0.4495334", "0.44951114", "0.44858262", "0.44806314", "0.44732645", "0.4443929", "0.4437077", "0.4434942", "0.44289282", "0.44289282", "0.44289282", "0.4426092", "0.44163463", "0.43936604", "0.43936598", "0.4378165", "0.43775487", "0.43688762", "0.43657896", "0.43646604", "0.43537393", "0.4351902", "0.43513495", "0.43474534", "0.43404233", "0.4336261", "0.4335071", "0.4328975", "0.43209073", "0.43180296", "0.43154237", "0.4313805", "0.43110684", "0.43110684", "0.4289381", "0.42871326", "0.42852598", "0.42839518", "0.42664275", "0.42642155", "0.42592025", "0.4256562", "0.42545062", "0.42516193", "0.424858", "0.42485037", "0.42406368", "0.42398235", "0.42395836", "0.42393982", "0.42306238", "0.42286482", "0.42242438", "0.4223725", "0.4222223", "0.42199534", "0.42167535", "0.4213841", "0.42015296", "0.42015296", "0.41979954", "0.4192164", "0.4191293", "0.41905013", "0.41903868", "0.41895428", "0.41861", "0.41822344", "0.4176404", "0.41695437" ]
0.5659059
1
if continue editing is hit, it should go back to the parent URL, I think?
def test_continue_editing_parent_object(self): user = User(username='test', is_staff=True, is_superuser=True, is_active=True) user.set_password('test') user.full_clean() user.save() admin_instance = get_modeladmin(Iframe) self.assertIsInstance(admin_instance, RealishAdmin) request = RequestFactory().get('/', { '_continue': 1, }) request.user = user iframe_admin = reverse('admin:embeds_iframe_add') response_301 = HttpResponsePermanentRedirect(redirect_to=iframe_admin) ct = get_content_type(User) iframe = Iframe(position=2, region='test', content_type=ct, content_id=user.pk, url='https://news.bbc.co.uk/') iframe.full_clean() iframe.save() new_response = admin_instance.maybe_fix_redirection( request=request, response=response_301, obj=iframe) self.assertEqual(new_response['X-Chunkadmin-Response'], 'redirect-to-parent') self.assertEqual(301, new_response.status_code) self.assertEqual('/admin_mountpoint/auth/user/1/?_data_changed=1', new_response['Location'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def submit_and_back(self):\n self.submit(skip_confirm=True)\n self.parent().do_detail()", "def submit_and_back(self):\n self.submit(skip_confirm=True)\n self.parent().do_detail()", "def response_change(self, request, obj):\r\n \r\n # in these cases, the redirect is good\r\n if list(set(request.POST.keys()) & set([\"_addanother\", \"_saveasnew\", \"_continue\"])):\r\n return super(ServeeModelAdmin, self).response_change(request, obj)\r\n \r\n # we want to override the default save case in the frontend\r\n ref = request.META.get(\"HTTP_REFERER\")\r\n if ref and (ref.find(\"/servee/\") == -1):\r\n if request.is_ajax():\r\n return HttpResponse(\"<script type='text/javascript'>window.location.reload(true);</script>\")\r\n else:\r\n return HttpResponseRedirect(ref)\r\n \r\n # fallback to normal functionality\r\n return super(ServeeModelAdmin, self).response_change(request, obj)", "def test_edit_view(self):\n target_url = url_for('content.edit_content')\n redirect_url = url_for('users.login', next=target_url)\n response = self.client.get(target_url)\n self.assertEqual(response.status_code, 302)\n self.assertRedirects(response, redirect_url)", "def __get_redirect_url(self):\n if self.get_submit_save_and_continue_edititing_button_name() not in self.request.POST:\n return self.request.cradmin_app.reverse_appindexurl()\n return self.request.cradmin_app.reverse_appurl(\n 'groupcomment-edit',\n args=self.args,\n kwargs=self.kwargs)", "def quit_form(self):\n self.parse_request()\n\n try:\n # Back to record list.\n # Parse list's url from the request path.\n pos = self.request.path.rfind(\"/\")\n if pos > 0:\n url = self.request.path[:pos] + \"/list.html\"\n if self.page:\n url += \"?_page=\" + str(self.page)\n return HttpResponseRedirect(url)\n except Exception, e:\n logger.log_tracemsg(\"Quit form error: %s\" % e)\n\n raise http.Http404", "def start_editing(self):\r\n if self._mode is None:\r\n self._mode = 'edit'\r\n params = {\r\n 'f' : 'json',\r\n 'sessionID' : self._guid\r\n }\r\n url = \"%s/startEditing\" % self._url\r\n res = self._con.post(url, params)\r\n return res['success']\r\n return False", "def editPage(request, title):\n entry = util.get_entry(title)\n if request.method == \"POST\":\n # check if the data is valid then save/replace old data\n form = editPageForm(request.POST)\n if form.is_valid():\n title = form.cleaned_data[\"editTitle\"]\n content = form.cleaned_data[\"editBody\"]\n\n util.save_entry(title, content)\n\n # take user to their editted page\n return HttpResponseRedirect(reverse(\"entry\", kwargs={\n \"title\": title\n }))\n # give user a editting form with existing data filled in by defult. \n else:\n editForm = editPageForm(initial={\n \"editTitle\": title,\n \"editBody\": entry\n })\n editFormTitle = editForm[\"editTitle\"]\n editFormBody = editForm[\"editBody\"]\n return render(request, \"encyclopedia/editPage.html\", {\n \"formTitle\": editFormTitle,\n \"formBody\": editFormBody\n })", "def edit(self):\n\n pass", "def response_post_save_change(self, request, obj):\n opts = self.model._meta\n\n if \"next\" in request.GET:\n return HttpResponseRedirect(request.GET['next'])\n\n if self.has_change_permission(request, None):\n post_url = reverse('admin:%s_%s_changelist' %\n (opts.app_label, opts.module_name),\n args=(quote(self.prescription.pk),),\n current_app=self.admin_site.name)\n else:\n post_url = reverse('admin:index',\n current_app=self.admin_site.name)\n\n return HttpResponseRedirect(post_url)", "def after_successful_edit(self):\n pass", "def response_change(self, request, obj):\n opts = obj._meta\n\n msg = 'The menu item \"%s\" was changed successfully.' % force_unicode(obj)\n\n if \"_continue\" in request.POST:\n self.message_user(request, msg + ' ' + \"You may edit it again below.\")\n return HttpResponseRedirect(request.path)\n\n elif \"_addanother\" in request.POST:\n self.message_user(request, msg + ' ' + (\"You may add another %s below.\" % force_unicode(opts.verbose_name)))\n return HttpResponseRedirect(obj.menu_item.menu.get_add_page_url())\n\n else:\n self.message_user(request, msg)\n return HttpResponseRedirect(obj.menu_item.menu.get_edit_url())", "def edit_redirect_url(self):\n return url_for(self.edit_redirect_to_view)", "def response_change(self, request, obj):\n if request.POST.has_key(\"_viewnext\"):\n msg = (_('The %(name)s \"%(obj)s\" was changed successfully.') %\n {'name': force_unicode(obj._meta.verbose_name),\n 'obj': force_unicode(obj)})\n next = obj.__class__.objects.filter(id_xt_pcce__gt=obj.id_xt_pcce).order_by('id_xt_pcce')[:1]\n if next:\n self.message_user(request, msg)\n return HttpResponseRedirect(\"../%s/\" % next[0].pk)\n return super(pcceAdmin, self).response_change(request, obj)", "def response_add(self, request, obj):\r\n\r\n # in these cases, the redirect is good\r\n if list(set(request.POST.keys()) & set([\"_addanother\", \"_continue\"])):\r\n return super(ServeeModelAdmin, self).response_change(request, obj)\r\n\r\n # we want to override the default save case in the frontend\r\n ref = request.META.get(\"HTTP_REFERER\")\r\n if ref and (ref.find(\"/servee/\") == -1):\r\n if request.is_ajax():\r\n return HttpResponse(\"<script type='text/javascript'>window.location.reload(true);</script>\")\r\n else:\r\n return HttpResponseRedirect(ref)\r\n\r\n # fallback to normal functionality\r\n return super(ServeeModelAdmin, self).response_add(request, obj)", "def response_change(self, request, obj):\n if request.POST.has_key(\"_viewnext\"):\n msg = (_('The %(name)s \"%(obj)s\" was changed successfully.') %\n {'name': force_unicode(obj._meta.verbose_name),\n 'obj': force_unicode(obj)})\n next = obj.__class__.objects.filter(id_xt_lab__gt=obj.id_xt_lab).order_by('id_xt_lab')[:1]\n if next:\n self.message_user(request, msg)\n return HttpResponseRedirect(\"../%s/\" % next[0].pk)\n return super(xtlabAdmin, self).response_change(request, obj)", "def change_view(self, request, object_id, extra_context=None):\n\n latest_draft = self.get_latest_draft(object_id)\n has_publish_perm = request.user.has_perm(\"easypublisher.can_approve_for_publication\")\n context = extra_context or {}\n\n if latest_draft:\n context['has_draft'] = latest_draft.pk\n \n if not context.get('current', False):\n \n if not has_publish_perm: \n return HttpResponseRedirect('drafts/%s/' % latest_draft.pk)\n \n return super(EasyPublisher, self).change_view(request, object_id, context)", "def response_change_formset(self, request, obj, post_url_continue='../../%s/%s/'):\n opts = obj._meta\n pk_value = obj._get_pk_val()\n verbose_name = opts.verbose_name\n # msg = _('The %(name)s \"%(obj)s\" was added successfully.') % {'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj)}\n msg = _(u'Изменения раздела \"%(title)s\" для %(name)s \"%(obj)s\" успешно сохранены.') % \\\n {\"title\" : force_unicode(self.formset_pages[self.page][\"title\"]), 'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj)}\n \n if \"_continue\" in request.POST:\n self.message_user(request, msg + ' ' + _(u\"Ниже Вы можете продолжить редактирование.\"))\n return HttpResponseRedirect(post_url_continue % (pk_value, self.page,))\n else:\n self.message_user(request, msg)\n if self.has_change_permission(request, None):\n return HttpResponseRedirect('../../')\n else:\n return HttpResponseRedirect('../../../../')", "def response_change(self, request, obj):\n if request.POST.has_key(\"_viewnext\"):\n msg = (_('The %(name)s \"%(obj)s\" was changed successfully.') %\n {'name': force_unicode(obj._meta.verbose_name),\n 'obj': force_unicode(obj)})\n next = obj.__class__.objects.filter(id_xt_pc__gt=obj.id_xt_pc).order_by('id_xt_pc')[:1]\n if next:\n self.message_user(request, msg)\n return HttpResponseRedirect(\"../%s/\" % next[0].pk)\n return super(pcAdmin, self).response_change(request, obj)", "def edit():", "def edit(self, **kwargs):\n ...", "def response_change(self, request, obj):\n if request.POST.has_key(\"_viewnext\"):\n msg = (_('The %(name)s \"%(obj)s\" was changed successfully.') %\n {'name': force_unicode(obj._meta.verbose_name),\n 'obj': force_unicode(obj)})\n next = obj.__class__.objects.filter(id_xt_mcce__gt=obj.id_xt_mcce).order_by('id_xt_mcce')[:1]\n if next:\n self.message_user(request, msg)\n return HttpResponseRedirect(\"../%s/\" % next[0].pk)\n return super(mcceAdmin, self).response_change(request, obj)", "def force_edit(request, page_id):\n context = RequestContext(request)\n try:\n page = Page.objects.get(pk=page_id)\n except Exception, e:\n raise e\n page.clear_editor()\n page.clear_cache(context)\n return HttpResponseRedirect(request.META['HTTP_REFERER'])", "def post(self) :\n self.redirect('/admin')", "def activate_external_editing(self, new_doc):\n new_doc.setup_external_edit_redirect(self.request, action=\"oneoffixx\")", "def editDetail(id):\n form = EditDetailForm(request.form)\n if request.method == \"GET\":\n return render_template(\"/pages/edit.html\", form=form)\n else:\n choose = True\n section = form.category.data\n return redirect(url_for(\"editDetailSection\", id=id ,section=section))", "def edit(request, address):\n if address.startswith(\"/\"):\n address = address[1:]\n if address.endswith(\"/\"):\n address = address[:-1]\n\n # we try to find the parent. Creating a page without parent isn't possible.\n parent = None\n if \"/\" in address:\n parent = address.rsplit(\"/\", 1)[0]\n else:\n parent = \"\"\n\n try:\n parent = Page.objects.get(address=parent)\n except Page.DoesNotExist:\n parent = None\n\n # try to get the page itself, which might exist\n try:\n page = Page.objects.get(address=address)\n except Page.DoesNotExist:\n page = None\n\n initial = {}\n if page:\n initial[\"title\"] = page.title\n initial[\"content\"] = page.content\n\n if request.method == 'POST':\n # the form has been sent, use the different access rights\n form = PageForm(request.POST, initial=initial)\n if form.is_valid():\n title = form.cleaned_data[\"title\"]\n content = form.cleaned_data[\"content\"]\n user = request.user\n user = user if user.is_authenticated else None\n can = False\n if user and user.is_superuser:\n # the superuser can do it all\n can = True\n elif parent and page is None and parent.access(user, \"write\"):\n # the page doesn't exist, but the parent does, and the user can edit it\n can = True\n elif page and page.access(user, \"write\"):\n # the page already exist and the user can edit it\n can = True\n\n if can:\n new_page = Page.objects.create_or_update_content(address, user, content)\n new_page.title = title\n if parent is not None and page is None:\n new_page.can_write = parent.can_write\n new_page.can_read = parent.can_read\n new_page.save()\n\n return HttpResponseRedirect('/wiki/' + address)\n else:\n form = PageForm(initial=initial)\n\n return render(request, \"wiki/edit.html\", {'form': form, 'address': address, \"page\": page, \"parent\": parent})", "def on_cancel(self, keypress=None):\n self.parentApp.switchFormPrevious()", "def response_change(self, request, obj):\n if request.POST.has_key(\"_viewnext\"):\n msg = (_('The %(name)s \"%(obj)s\" was changed successfully.') %\n {'name': force_unicode(obj._meta.verbose_name),\n 'obj': force_unicode(obj)})\n next = obj.__class__.objects.filter(id_xt_sust__gt=obj.id_xt_sust).order_by('id_xt_sust')[:1]\n if next:\n self.message_user(request, msg)\n return HttpResponseRedirect(\"../%s/\" % next[0].pk)\n return super(xt_sustanciasAdmin, self).response_change(request, obj)", "def response_change(self, request, obj):\n if request.POST.has_key(\"_viewnext\"):\n msg = (_('The %(name)s \"%(obj)s\" was changed successfully.') %\n {'name': force_unicode(obj._meta.verbose_name),\n 'obj': force_unicode(obj)})\n next = obj.__class__.objects.filter(xt_id_mb__gt=obj.xt_id_mb).order_by('xt_id_mb')[:1]\n if next:\n self.message_user(request, msg)\n return HttpResponseRedirect(\"../%s/\" % next[0].pk)\n return super(mbAdmin, self).response_change(request, obj)", "def changelist_view(self, request, extra_context=None):\n return HttpResponseRedirect(reverse('admin:index'))", "def changelist_view(self, request, extra_context=None):\n return HttpResponseRedirect(reverse('admin:index'))", "def response_change(self, request, obj):\n if request.POST.has_key(\"_viewnext\"):\n msg = (_('The %(name)s \"%(obj)s\" was changed successfully.') %\n {'name': force_unicode(obj._meta.verbose_name),\n 'obj': force_unicode(obj)})\n next = obj.__class__.objects.filter(id_xt_mc__gt=obj.id_xt_mc).order_by('id_xt_mc')[:1]\n if next:\n self.message_user(request, msg)\n return HttpResponseRedirect(\"../%s/\" % next[0].pk)\n return super(mcAdmin, self).response_change(request, obj)", "def handle_edit_tag_form(tag_id):\n\n return redirect('/')", "def edit(request, pageName):\n \n if request.method == \"POST\":\n form = EditForm(request.POST)\n \n if form.is_valid(): \n content = form.cleaned_data[\"content\"]\n title = form.cleaned_data[\"title\"]\n \n util.save_entry(title, content)\n return HttpResponseRedirect(reverse(\"encyclopedia:visit_entry\", args=(title, )))\n \n else:\n\n form = EditForm({'title': pageName, 'content': util.get_entry(pageName) })\n return render(request, \"encyclopedia/edit_page.html\", {\n \"form\": EditForm(),\n \"pageName\": pageName\n })\n \n \n return render(request, \"encyclopedia/edit_page.html\", {\n \"form\": EditForm({'title': pageName, 'content': util.get_entry(pageName) }),\n \"pageName\": pageName\n })", "def response_post_save_add(self, request, obj):\n opts = self.model._meta\n\n if \"next\" in request.GET:\n return HttpResponseRedirect(request.GET['next'])\n\n if self.has_change_permission(request, None):\n post_url = reverse('admin:%s_%s_changelist' %\n (opts.app_label, opts.module_name),\n args=(quote(self.prescription.pk),),\n current_app=self.admin_site.name)\n else:\n post_url = reverse('admin:index',\n current_app=self.admin_site.name)\n\n return HttpResponseRedirect(post_url)", "def response_add(self, request, obj, post_url_continue='../%s/'):\n opts = obj._meta\n pk_value = obj._get_pk_val()\n\n msg = '\"%s\" was successfully added to the \"%s\" menu.' % (\n force_unicode(obj),\n obj.menu_item.menu\n )\n\n if \"_continue\" in request.POST:\n self.message_user(request, msg + ' ' + \"You may edit it again below.\")\n return HttpResponseRedirect(post_url_continue % pk_value)\n\n elif \"_addanother\" in request.POST:\n self.message_user(request, msg + ' ' + (\"You may add another %s below.\" % force_unicode(opts.verbose_name)))\n return HttpResponseRedirect('%s?menu=%s' % (\n request.path,\n obj.menu_item.menu.pk,\n ))\n\n else:\n self.message_user(request, msg)\n return HttpResponseRedirect(obj.menu_item.menu.get_edit_url())", "def get_success_url(self, request):\n return request.POST.get('next', reverse('mod_admin'))", "def link_edit_callback(self):\n pass", "def goToAdmin(request):\n\n\ttemplate = '../admin'\n\treturn HttpResponseRedirect(template)", "def return_to_source(origin,parent_object_id,target_username):\n\tif origin in ('home','home_reply'):\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"home_loc_pk\",pk=parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"home\")\n\telif origin == 'history':\n\t\tif target_username:\n\t\t\treturn redirect(\"user_activity\",slug=target_username)\n\t\telse:\n\t\t\treturn redirect(\"home\")\n\telif origin == 'public':\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"public_group\",slug=parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"home\")\n\telif origin == 'search':\n\t\treturn redirect(\"search_username\")\n\telif origin == 'profile':\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"user_profile\",slug=parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"home\")\t\n\telif origin == 'profile_photos':\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"profile\",slug=parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"home\")\t\n\telif origin == 'best_photos':\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"best_photo_loc_pk\", parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"home\")\n\telif origin == 'photo_comments':\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"comment\", parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"home\")\n\telif origin == 'fresh_photos':\n\t\tif parent_object_id:\n\t\t\treturn redirect(\"see_photo_pk\", parent_object_id)\n\t\telse:\n\t\t\treturn redirect(\"photo\")\n\telse:\n\t\treturn redirect(\"home\")", "def editProfile():\n form = EditProfileForm(request.form)\n if request.method == \"GET\":\n return render_template(\"/pages/editprofile.html\", form=form)\n else:\n choose = True\n section = form.category.data\n return redirect(url_for(\"editProfileSection\", section=section))", "def edit_entry(self):\r\n self.session = tk.Toplevel(self.master, **jt.bframe_style)\r\n jd.Page(self.session, self.source)", "def edit_form():\n return template (\"edit\")", "def test_edit_page_redirects_to_home(testapp, fill_the_db, login_testcase):\n response = testapp.get(\"/journal/1/edit-entry\")\n csrf_token = response.html.find(\"input\", {\"name\": \"csrf_token\"})\n csrf_token = csrf_token.attrs['value']\n\n post_params = {\n 'id': 1,\n 'title': 'Learning Journal Title',\n 'body': 'So many things learned today.',\n 'csrf_token': csrf_token\n }\n response = testapp.post('/journal/1/edit-entry', post_params, status=302)\n full_response = response.follow()\n\n new_title = full_response.html.find_all(class_='entrytitle')[-1].text[1:-1]\n\n assert new_title == post_params['title']", "def get_success_url(self):\n if (any((\n 'review' in self.request.POST,\n 'back_to_leader_step' in self.request.POST,))):\n # Send back to the correct list\n if self.request.user == self.revision.approver:\n url = 'approver_review_document_list'\n elif self.request.user == self.revision.leader:\n url = 'leader_review_document_list'\n else:\n url = 'reviewers_review_document_list'\n\n url = reverse(url)\n\n else:\n url = ''\n\n return url", "def finishEditing(self):\n\t\tself.changed_event()", "def response_post_save_change(self, request, obj):\n url = reverse('admin:prescription_prescription_detail',\n args=[str(obj.id)])\n return HttpResponseRedirect(url)", "def edit(self,item=None):\r\n raise AbstractError\r\n return False", "def companylink_update(request, slug):\n\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n companylink_reference = get_object_or_404(CompanyLink, company=company)\n companylink_form = CompanyLinkForm(instance=companylink_reference)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents info, \n if request.method == 'GET':\n return render_to_response('companylink_form.html',{'form':companylink_form, 'info': companylink_reference},context_instance=RequestContext(request))\n else:\n companylink_form = CompanyLinkForm(request.POST, instance=companylink_reference)\n #if is POST Validates the form is well filled and save it redirecting to the company page \n if companylink_form.is_valid():\n companylink_form.save()\n\n return HttpResponseRedirect('/company/'+str(slug))\n #if not well filled redirect to the original update page and display error\n else:\n return render_to_response('companylink_form.html', \n {'form': companylink_form, 'form_errors': companylink_form.errors, 'info': companylink_reference},\n context_instance=RequestContext(request))", "def _parentDirectoryActionTriggeredSlot(self):\r\n\r\n self._controller.model.activeIndex = self._controller.model.activeIndex.parent()", "def home_edituser():\n\tpass", "def edit_record(request, slug, pk):\n # Try except to make sure the user is a member of this project\n try:\n ProjectMember.objects.get(user=request.user, project=Project.objects.get(slug=slug))\n except ObjectDoesNotExist:\n # User is not a member.\n return HttpResponse(\"You're trying to access a project you're not a member of or a project that does not exist.\")\n else:\n # User is a member,\n project = get_object_or_404(models.Project, slug=slug)\n record = get_object_or_404(models.Record, pk=pk)\n pm = ProjectMember.objects.get(user=request.user, project=project)\n\n # Access control.. if not owner or editor - access denied.\n if pm.is_owner or pm.is_editor:\n # User has access\n if request.method == 'POST':\n # User submits data\n form1 = forms.GeneralRecordForm(request.POST)\n form2 = forms.SpecificRecordForm(request.POST, entry=request.POST['entry_type'])\n context = {\n 'form1':form1,\n 'project':project,\n 'form':form2,\n }\n if form2.is_valid() and form1.is_valid():\n fields = [f.name for f in models.Record._meta.get_fields()]\n data1 = form1.clean()\n data2 = form2.clean()\n # Additional form validation.\n if data1['entry_type'] == 'book':\n if data2['author']== '' and data2['editor'] == '':\n context['err'] = True\n context['errmessage'] = \"Fill in either Author or Editor\"\n return render(request, 'records/record_edit.html', context)\n elif data1['entry_type'] == 'inbook':\n if data2['author'] == '' and data2['editor'] == '':\n context['err'] = True\n context['errmessage'] = \"Fill in either Author or Editor\"\n return render(request, 'records/record_edit.html', context)\n elif data2['chapter'] == '' and data2['pages'] == '':\n context['err'] = True\n context['errmessage'] = \"Fill in either Chapter or Pages\"\n return render(request, 'records/record_edit.html', context)\n # Form is valid .. save into new record\n # making sure no one has edited the record while session is running\n if record.last_edited.__str__() == request.COOKIES.get('last_edited'):\n # No conflict, go on save changes.\n record.entry_type = data1['entry_type']\n record.cite_key = data1['cite_key']\n record.project = project\n for fieldname in fields:\n if fieldname in data2:\n setattr(record, fieldname, data2[fieldname])\n record.last_edited = timezone.now()\n record.save()\n # Send user back to project detail, the overview of all records in the project.\n return redirect('projects:single', slug=slug)\n else:\n # someone changed the record before the user managed to save\n data = forms.ShowRecordForm(data=model_to_dict(record), entry=record.entry_type)\n context = {\n 'old_record':record,\n 'form1':form1,\n 'project':project,\n 'form':form2,\n 'data':data\n }\n # send user to the conflict page.\n return render(request, 'records/record_conflict.html', context)\n\n else:\n # Form is not valid\n context = {\n 'form1':form1,\n 'project':project,\n 'form':form2,\n 'err':True\n }\n return render(request, 'records/record_edit.html', context)\n else:\n # User hasn't submitted any data yet\n # Form filled in with data for selected record.\n form1 = forms.GeneralRecordForm(data=model_to_dict(record))\n form2 = forms.SpecificRecordForm(data=model_to_dict(record),entry=record.entry_type)\n context = {\n 'form1':form1,\n 'form2':form2,\n 'project':project,\n 'record':record\n }\n # Create response in order to set cookie\n response = render(request, 'records/record_edit.html', context)\n # set cookie to enable later check for conlfict\n response.set_cookie('last_edited', record.last_edited.__str__())\n return response\n else:\n # Access denied.\n return HttpResponse(\"You don't have the permission to do this\")", "def edit_form(pagename):\n\n articles = get_articles()\n\n edit_article = None\n for article in articles:\n if article[\"title\"] == pagename:\n edit_article = article\n\n if edit_article == None:\n return template(\"skapa-artikel\")\n\n else:\n return template(\"edit\", article=edit_article)", "def handle_edit_post(post_id):\n edited_post = Post.query.get_or_404(post_id)\n\n edited_post.title = request.form['post-title']\n edited_post.content = request.form['post-content']\n\n db.session.add(edited_post)\n db.session.commit()\n\n return redirect(f\"/users/{edited_post.user_id}\")", "def submit_and_back(self):\n self.submit(goback=True)\n if self.first_time:\n if self.keep_sel:\n self.parent().do_select()\n else:\n self.parent().do_start()\n else:\n self.parent().do_detail()", "def entry_page():\n return redirect(url_for('index'))", "def edit_accomment(request, pk):\n\n comment = get_object_or_404(ActorComment, pk=pk)\n form = ActorCommentForm()\n if request.method == \"POST\":\n form = ActorCommentForm(request.POST, instance=comment)\n\n if form.is_valid():\n form.save()\n url = '../../' + str(comment.actor.pk)\n return redirect(url)\n\n context = {\n 'form': form,\n 'comment': comment,\n }\n return render(request, context)", "def edit_entry():\n if not check_admin_logged() :\n abort(403)\n\n title = request.form['title']\n category = request.form['category']\n buydate = request.form['buydate']\n ssid = decrypt_book_record(request.form['ssid'])\n\n if not check_items_in_form(title, category, buydate):\n return redirect(url_for('show_entries_admin'))\n\n edited_entry = Entries.query.filter_by(\n id=ssid, title=title, category=category, \\\n buydate=buydate).first()\n\n if edited_entry is not None :\n edited_entry.introduction = request.form['introduction']\n if db.session.is_modified(edited_entry) :\n # commit only if something is modified\n try :\n db.session.commit()\n except IntegrityError as e :\n log_error('error when edit:')\n log_error(e.message)\n flash(u'数据库操作失败导致更新失败!请看后台日志')\n flash(u'成功更新条目')\n\n return redirect(url_for('show_entries_admin'))", "def manage_edit_save(self, REQUEST):\n self._config.update(ldap_config.read_form(REQUEST.form, edit=True))\n REQUEST.RESPONSE.redirect(self.absolute_url() + '/manage_edit')", "def post(self):\n cont = self.request_string('continue', default=\"/\")\n self.redirect(users.create_login_url(cont))", "def goToPrevLink():\n if wikiPageStackTrace[-2].getUrl() != \"\":\n oldpage = wikiPageStackTrace[-2]\n print(\"going back to \", oldpage.getUrl())\n titleStackTrace.append(oldpage.getTitle())\n urlStackTrace.append(oldpage.getUrl())\n del wikiPageStackTrace[-1]\n update()\n else:\n update()", "def dispatch(self, request, *args, **kwargs):\n if self.same_user_or_shiftleader(request.user):\n return super(UpdateRun, self).dispatch(request, *args, **kwargs)\n return redirect_to_login(\n request.get_full_path(), login_url=reverse(\"admin:login\")\n )", "def edit_game(request, game_id):\n game = Game.objects.get(id=game_id)\n user = request.user\n if not user.is_staff and user not in game.moderators.all():\n return HttpResponseRedirect('/')\n\n\n if request.method == 'POST':\n # If user changes a gamename we have to do several things:\n # change name in objects, change all paths and make sure that no other \n # game with this name exists\n if 'name' in request.POST and request.POST['name'] != game.name:\n games = Game.objects.filter(name=request.POST['name'])\n if len(games) > 0:\n form = EditGameForm(initial={\n 'name': game.name,\n 'description': game.rules_file.read(),\n 'judge_language': game.judge_lang,\n })\n error_msg = 'There exist other game with that name!'\n return render_to_response('gaming/edit_game.html',\n {\n 'form': form,\n 'game': game,\n 'error_msg': error_msg,\n },\n context_instance=RequestContext(request))\n \n # When we made sure that game name is unique we can make necessairy changes\n oldname = game.name\n game.name = request.POST['name']\n game.save()\n\n def move(source, file):\n filename = file.name.split('/').pop()\n file.save(filename, file)\n system('rm -rf ' + source)\n\n move(settings.GAME_RULES_PATH + oldname + '/', game.rules_file)\n move(settings.GAME_JUDGE_SOURCES_PATH + oldname + '/', game.judge_source_file)\n move(settings.GAME_JUDGE_BINARIES_PATH + oldname + '/', game.judge_bin_file)\n\n # When somebody updated decsription it's easier to create new file\n # instead of diff with previous one.\n if 'description' in request.POST:\n # create new file\n path = settings.GAME_RULES_PATH + game.name + '/'\n filename = game.rules_file.name.split('/').pop()\n\n game.rules_file.delete()\n f = open(path + 'tempfile', 'w')\n f.write(request.POST['description'])\n f = open(path + 'tempfile', 'rw')\n\n # save changes\n file_to_save = File(f)\n game.rules_file.save(filename, file_to_save)\n\n # remove temp file\n system('rm ' + path + 'tempfile')\n\n # if user updated just rules file we've got an easy job :P\n if 'game_rules' in request.FILES:\n game.rules_file.delete()\n game.rules_file = request.FILES['game_rules']\n \n # if judge file has changed bigger changes are necessairy\n game.judge_lang = request.POST['judge_language'] \n if 'game_judge' in request.FILES:\n # delete old (no longer needed files\n game.judge_source_file.delete()\n game.judge_bin_file.delete()\n\n #save new instead\n game.judge_source_file = request.FILES['game_judge']\n game.save()\n\n # Recompile source file to directory with source file\n src = settings.MEDIA_ROOT + game.judge_source_file.name\n target = settings.MEDIA_ROOT + game.judge_source_file.name + '.bin' \n lang = game.judge_lang\n compile(src, target, lang)\n\n # Use compiled file in object game\n f = File(open(target))\n game.judge_bin_file.save(game.name, f)\n\n # Save changes made to game object\n game.save()\n\n # Remove compiled file from directory with source\n system('rm ' + target)\n \n return HttpResponseRedirect('/game_details/' + game_id + '/')\n\n game.save()\n return HttpResponseRedirect('/game_details/' + game_id + '/')\n\n else:\n form = EditGameForm(initial={\n 'name': game.name,\n 'description': game.rules_file.read(),\n 'judge_language': game.judge_lang,\n })\n return render_to_response('gaming/edit_game.html',\n {\n 'form': form,\n 'game': game,\n },\n context_instance=RequestContext(request))", "def response_add(self, request, obj, post_url_continue=None):\n # We should allow further modification of the user just added i.e. the\n # 'Save' button should behave like the 'Save and continue editing'\n # button except in two scenarios:\n # * The user has pressed the 'Save and add another' button\n # * We are adding a user in a popup\n if '_addanother' not in request.POST and IS_POPUP_VAR not in request.POST:\n request.POST['_continue'] = 1\n return super(UserAdmin, self).response_add(request, obj,\n post_url_continue)", "def edit_person(self, pk):", "def edit_post(request, year, month, day, slug):\n post = get_model_for_date_and_slug(Post, year, month, day, slug)\n form = PostForm(instance=post)\n if request.method == \"POST\":\n form = PostForm(request.POST, instance=post)\n if form.is_valid():\n post = form.save()\n if \"continue_editing\" in request.POST:\n return http.HttpResponseRedirect(post.get_edit_url())\n return http.HttpResponseRedirect(post.get_absolute_url())\n return render_to_response(\"montgomery/edit_post.html\", {\"form\": form}, context_instance=RequestContext(request))", "def edit():\n database.ask(mode='single')\n F = database.check(single=True)\n if F and hasattr(F,'edit'):\n name = database[0]\n F.edit(name)", "def edit(request):\n if 'form.submitted' in request.params:\n # delete old post\n title = request.params['title']\n name = title_to_name(title)\n\n if not name or DBSession.query(Post).filter(Post.name==name).count():\n # this should be a popup ajaxy box\n return Response(\"Name %s is in use, choose a different title\" % name, content_type='text/plain', status_int=500)\n\n body = request.params['body']\n post = Post(title, body, name)\n DBSession.add(post)\n return HTTPFound(location = request.route_url('view_post', postname=name))\n\n save_url = request.route_url('edit_post')\n post = DBSession.query(Post).filter(Post.name==name).first()\n return environment_factory(post=post, save_url=save_url)", "def url_to_edit(obj):\n return reverse(\n 'admin:%s_%s_change' % (obj._meta.app_label, obj._meta.model_name),\n args=[obj.id]\n )", "def __gotoLastEditPosition(self):\n self.activeWindow().gotoLastEditPosition()", "def edit_recipe(description):\n session['description']=description\n if request.method == 'POST':\n des_result=(USERS[session['username']].recipe_category[session['current_recipe_category_title']].\n update_description(session['description'], request.form['description']))\n status_result=(USERS[session['username']].recipe_category[session['current_recipe_category_title']].\n update_status(session['description'], request.form['status']))\n if des_result == 'recipe updated' or status_result == 'recipe updated':\n flash('recipe updated', 'info')\n else:\n flash(des_result, 'warning')\n return redirect(url_for('edit_recipe', recipe_category_title=session['current_recipe_category_title']))\n return render_template('edit_recipe.html', item=USERS[session['username']]\n .recipe_category[session['current_recipe_category_title']].recipes[description],\n recipes=USERS[session['username']].\n recipe_category[session['current_recipe_category_title']].recipes)", "def StopEditing(self):\r\n\r\n self._owner.GetParent().OnRenameCancelled(self._pageIndex)\r\n self.Finish()", "def edit_item(item_id):\n if 'userinfo' not in session.keys():\n session['target'] = url_for('edit_item', item_id=item_id)\n return redirect(url_for('gconnect'))\n if request.method == 'POST':\n sqlsession = SQLSESSION()\n item = sqlsession.query(Item).filter_by(id=item_id).first()\n item.name = request.form['name']\n item.category_id = request.form['category']\n item.description = request.form['description']\n sqlsession.commit()\n return redirect(url_for('view_item', item_id=item_id))\n sqlsession = SQLSESSION()\n item = sqlsession.query(Item).filter_by(id=item_id).first()\n categories = sqlsession.query(Category).all()\n return render_template(\"edit_item.html\",\n item=item,\n categories=categories)", "def office_update(request, slug, id):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n office_reference = get_object_or_404(Office, id=id,company=company)\n office_form = OfficeForm(instance=office_reference)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents info, \n if request.method == 'GET':\n return render_to_response('office_form.html',{'form':office_form, 'info': office_reference},context_instance=RequestContext(request))\n else:\n office_form = OfficeForm(request.POST, instance=office_reference)\n #if is POST Validates the form is well filled and save it redirecting to the company page \n if office_form.is_valid():\n office_form.save(commit = False)\n\n return HttpResponseRedirect('/company/'+str(slug))\n #if not well filled redirect to the original update page and display error\n else:\n return render_to_response('office_form.html', \n {'form': office_form, 'form_errors': office_form.errors, 'info': office_reference},\n context_instance=RequestContext(request))", "def idx(_request):\n return HttpResponseRedirect('/home')", "def source():\n return redirect(get_last_menus_url())", "def edit(slug):\n entry = get_object_or_404(Entry, Entry.slug == slug)\n if request.method == 'POST':\n if request.form.get('title'):\n entry.title = request.form.get('title')\n if request.form.get('content'):\n entry.content = request.form.get('content')\n entry.published = request.form.get('published') or False\n entry.save()\n\n flash('Entry saved successfully!', 'success')\n if entry.published:\n return redirect(url_for('detail', slug=entry.slug))\n else:\n return redirect(url_for('edit', slug=entry.slug))\n return render_template('edit.html', entry=entry)", "def subproduct_add_case_edit_skip(request):\n session = request.session.get('new_product', {})\n if not session:\n raise Http404()\n\n gtin = session.get('gtin', 0)\n try:\n product = Product.objects.get(gtin=gtin)\n except:\n return redirect(reverse('products:products_list'))\n\n # we remove subproducts, we move to the next step\n session['sub_products'] = []\n return redirect(reverse('products:fulledit_js', args=(product.id,)))", "def on_click(self) -> None:\n self.editing = True", "def cook(self, obj, request, field_name):\n view_url = ''\n edit_url = ''\n \n if hasattr(obj, 'get_absolute_url'):\n view_url = obj.get_absolute_url();\n if request.user.has_perm('%s.change_%s' %(obj._meta.app_label, obj._meta.model_name)):\n\t\t\tedit_url = reverse('admin:%s_%s_change' %(obj._meta.app_label, obj._meta.model_name), args=[obj.id])\n\t\t\n result = {'text': unicode(obj),\n 'view_url': view_url,\n 'edit_url': edit_url\n }\n return result", "def edit_document():", "def edit_page(request, page_id, parent_id=None):\n page = None\n files = None\n\n if page_id:\n page = models.Page.get_by_id(int(page_id))\n if not page:\n return utility.page_not_found(\n request, 'No page exists with id %r.' % page_id)\n if not page.user_can_write(request.profile):\n return utility.forbidden(request)\n files = list(\n models.FileStore.all().filter('parent_page =', page).order('name'))\n for item in files:\n item.icon = '/static/images/fileicons/%s.png' % item.name.split('.')[-1]\n\n acl_data = None\n\n if page:\n all_group_keys = [\n g.key() for g in models.UserGroup.all().order('name')]\n groups_without_write_keys = [\n k for k in all_group_keys if k not in page.acl.group_write]\n groups_without_read_keys = [\n k for k in all_group_keys if k not in page.acl.group_read]\n acl_data = {\n 'groups_without_write': models.UserGroup.get(groups_without_write_keys),\n 'groups_without_read': models.UserGroup.get(groups_without_read_keys),\n 'group_write': models.UserGroup.get(page.acl.group_write),\n 'group_read': models.UserGroup.get(page.acl.group_read),\n 'user_write': models.UserProfile.get(page.acl.user_write),\n 'user_read': models.UserProfile.get(page.acl.user_read),\n 'inherits_acl': page.inherits_acl(),\n }\n\n if not request.POST:\n form = forms.PageEditForm(data=None, instance=page)\n return utility.respond(request, 'admin/edit_page',\n {'form': form, 'page': page, 'files': files,\n 'acl_data': acl_data})\n\n form = forms.PageEditForm(data=request.POST, instance=page)\n\n if not form.errors:\n try:\n page = form.save(commit=False)\n except ValueError, err:\n form.errors['__all__'] = unicode(err)\n if form.errors:\n return utility.respond(request, 'admin/edit_page',\n {'form': form, 'page': page, 'files': files})\n\n page.content = request.POST['editorHtml']\n if parent_id and not page.parent_page:\n page.parent_page = models.Page.get_by_id(int(parent_id))\n page.put()\n\n return utility.edit_updated_page(page.key().id(),\n message_id='msgChangesSaved')", "def edit_entry(entry_id):\n\n\tif not session.get('logged_in'):\n\t\tabout(401)\n\n\tquery = 'UPDATE entries SET text=\"%s\" WHERE id=\"%s\"' % (\n\t\trequest.form['text'], str(entry_id))\n\tg.db.execute(query)\n\tg.db.commit()\n\tflash(\"Entry Edited\")\n\n\treturn redirect(url_for('show_entries'))", "def redirect_old_draft(page):\r\n return redirect(url_for('.draft', page=page), 301)", "def is_edit(self):\n return self._tag == 'edit'", "def new_page(request, parent_id):\n if parent_id:\n parent_page = models.Page.get_by_id(int(parent_id))\n else:\n parent_page = models.Page.get_root()\n if parent_page:\n # there is a root, lets force everything to be a child of the root\n # and set the parent_id\n parent_id = parent_page.key().id()\n else:\n # TODO(gpennington): Figure out a more intuitive method for site\n # initialization\n parent_page = utility.set_up_data_store()\n return utility.edit_updated_page(parent_page.key().id())\n\n if not parent_page.user_can_write(request.profile):\n return utility.forbidden(request)\n return edit_page(request, None, parent_id=parent_id)", "def editMenuItemPage(restaurant_id, item_id):\n item = db_methods.searchItemByID(item_id)\n res_id = restaurant_id\n item_id = item_id\n if request.method == 'POST':\n item_name = request.form['item_name']\n item_price = request.form['item_price']\n item_desc = request.form['item_desc']\n item_course = request.form['item_course']\n if item_name and item_price and item_desc and item_course:\n db_methods.editMenuItem(item_name, item_price, item_desc, item_course, item_id)\n time.sleep(0.1)\n return redirect('/restaurants/%s/menu' % res_id)\n else:\n error = \"Please fill out all required fields.\"\n return render_template(\"editmenuitem.html\", error = error)\n else:\n return render_template('editmenuitem.html', item=item, res_id=res_id)", "def edit_post_process(post_id):\n\n # extract form data, commit, then redirect to /users\n f_title = request.form[\"post-title\"].strip()\n f_content = request.form[\"post-content\"].strip()\n\n # msg will also include a field for the user_id for routing.\n msg = db_edit_post(post_id, f_title, f_content)\n\n flash(msg[\"text\"], msg[\"severity\"])\n\n return redirect(f\"/users/{msg['user_id']}\")", "def process_post_edit(user_id, post_id):\n\n title = request.form.get('title')\n content = request.form.get('content')\n\n post = Post.query.get_or_404(post_id)\n\n post.title = title\n post.content = content\n\n db.session.add(post)\n db.session.commit()\n\n return redirect(f'/users/{user_id}/posts/{post_id}')", "def adminedit(object, id):\n\n db = get_db()\n\n if request.method == \"POST\":\n execute_string = 'UPDATE ' + object.title() + \" SET \"\n\n if object == 'post':\n execute_string += 'title = \"' + request.form['title'] + '\", content = \"' + request.form['content'] + '\", authorId = ' + request.form[\"authorid\"] + ', categoryId = ' + request.form[\"categoryid\"] + ''\n elif object == 'author':\n execute_string += 'name = \"' + request.form['name'] + '\"'\n elif object == 'category':\n execute_string += 'name = \"' + request.form['name'] + '\", description = \"' + request.form['description'] + '\"'\n\n execute_string += \" WHERE id = \" + str(id)\n db.execute(execute_string)\n db.commit()\n return redirect(url_for(\"adminview\", object=object))\n\n execute_string = \"SELECT * FROM \" + object.title() + \" WHERE id = \" + str(id)\n item = db.execute(execute_string).fetchone()\n\n return render_template(\"new.html\", object=object, item=item)", "def redirect_to_next(self, request):\n\n if 'next' in request.GET:\n next_page = request.GET['next']\n return HttpResponseRedirect(next_page)\n else:\n return redirect('index')", "def user_settings(request):\n return redirect('edit_profile')", "def edit_post(bid, pid):\n # pylint: disable=unused-argument\n pst = Post.query.get(pid)\n form = PostForm(request.form)\n if request.method == 'POST' and current_user.uid == pst.uid:\n if form.validate():\n if pst.name != form.name.data or pst.text != form.desc.data:\n og_name = pst.name\n pst.name = form.name.data\n pst.text = form.desc.data\n DB.session.commit()\n flash('Post ({}) successfully edited!'.format(og_name))\n else:\n flash(constants.DEFAULT_SUBMISSION_ERR)\n return redirect(request.referrer)", "def test_edit_button_appears(self):\n response = self.client.get(reverse('wagtailnews:index', kwargs={\n 'pk': self.index.pk}))\n self.assertContains(response, self.url)", "def get_redirect_url(self, *args, **kwargs):\n if \"next\" in self.request.POST:\n return self.request.POST.get(\"next\")\n return reverse(\"my_reservations\")", "def post(self):\n if self.data.GET.get('cbox'):\n cbox = True\n else:\n cbox = False\n\n if self.validate():\n self.redirect.program()\n self.redirect.to('edit_gci_program', validated=True, cbox=cbox)\n else:\n self.get()", "def handle_content_edit(content_id):\n\n # instance of ContentForm is available to both GET and POST requests\n form = ContentForm()\n\n # content will be None if it cannot be found\n content = Content.find_by_id(content_id)\n\n # POST - for handling the edit content form\n if form.validate_on_submit():\n\n # validation - owner email must exist\n owner_email = form.owner_email.data\n owner_obj = Owner.find_by_email(owner_email)\n if not owner_obj:\n flash(f'Owner with the email {owner_email} does not exist!',\n 'danger')\n # if owner not exist, edit page is reloaded with same content id\n return redirect(url_for('content_edit', content_id=content.id))\n\n # content type choice is extracted from the form\n choice = form.content_type.data # user choice\n choices = dict(ContentForm.SELECT_CHOICES) # all possible choices\n\n # content is updated with form values and saved to the database\n content.content_name = form.content_name.data.title()\n content.content_type = choices.get(choice)\n content.valid_months = form.valid_months.data\n content.updated_at = date.today() # today's date becomes last updated\n content.owner_id = owner_obj.id\n\n # saving content errors handled\n try:\n content.save_content()\n except HTTPException:\n return \"Server cannot update the content at this time\", 500\n\n # user is redirected to the main content page with success msg\n flash(f'{content.content_name} has been updated!', 'success')\n return redirect(url_for('content'))\n\n # GET - display the form\n # form is pre-populated with existing content data\n form.content_name.data = content.content_name\n form.owner_email.data = Owner.find_by_id(content.owner_id).owner_email\n form.valid_months.data = content.valid_months\n form.submit.data = \"Update Content\"\n\n # content type stored in this content is looked up against all types\n # each choice is a tuple pair - (stored choice, displayed choice)\n for form_type in ContentForm.SELECT_CHOICES:\n # choice becomes default value on form if it matches the stored value\n if form_type[1] == content.content_type:\n form.content_type.data = form_type[0]\n\n return render_template('content_edit.html',\n content_name=content.content_name,\n form=form)", "def test_save_and_continue_editing_redirects_to_update(self):\n with open(fixture_file, 'rb') as fp:\n params = {\n \"caption\": \"some file\",\n \"publication\": fp,\n \"_continue\": \"\"\n }\n response = self.client.post(reverse(\"admin2:files_captionedfile_create\"),\n params)\n captioned_file = CaptionedFile.objects.get(caption=\"some file\")\n self.assertRedirects(response, reverse(\"admin2:files_captionedfile_update\",\n args=(captioned_file.pk, )))", "def hgHisteditContinue(self, name):\n # find the root of the repo\n repodir = self.vcs.splitPath(name)[0]\n while not os.path.isdir(os.path.join(repodir, self.vcs.adminDir)):\n repodir = os.path.dirname(repodir)\n if os.path.splitdrive(repodir)[1] == os.sep:\n return False\n \n args = self.vcs.initCommand(\"histedit\")\n args.append(\"--continue\")\n args.append(\"-v\")\n \n editor = os.path.join(\n os.path.dirname(__file__), \"HgHisteditEditor.py\")\n env = {\"HGEDITOR\": \"{0} {1}\".format(sys.executable, editor)}\n \n dia = HgDialog(\n self.tr(\"Continue histedit session\"),\n self.vcs,\n useClient=False)\n res = dia.startProcess(args, repodir, environment=env)\n if res:\n dia.exec_()\n res = dia.hasAddOrDelete()\n self.vcs.checkVCSStatus()\n return res" ]
[ "0.6385778", "0.6385778", "0.6379418", "0.62234807", "0.6166165", "0.61428994", "0.6119325", "0.6115395", "0.61025864", "0.60791975", "0.6070473", "0.6042546", "0.60110354", "0.60064095", "0.600623", "0.5988462", "0.59622556", "0.59316474", "0.590217", "0.58978534", "0.5896916", "0.5869238", "0.58592373", "0.5857044", "0.58454955", "0.5832334", "0.58213085", "0.5808154", "0.5802074", "0.57872236", "0.5782465", "0.5782465", "0.5756899", "0.57509625", "0.5750548", "0.5722687", "0.57171327", "0.56690055", "0.5644107", "0.559165", "0.5581645", "0.5579687", "0.55705756", "0.5556698", "0.5539305", "0.5525691", "0.55223024", "0.54909664", "0.54846406", "0.54668653", "0.54561293", "0.5444459", "0.5434827", "0.5425264", "0.5422339", "0.53944796", "0.5389491", "0.538796", "0.536765", "0.53489", "0.5308764", "0.5299988", "0.52970695", "0.52932006", "0.529181", "0.5288616", "0.52849007", "0.52833384", "0.5279252", "0.5256369", "0.52532524", "0.5251459", "0.5250964", "0.5244498", "0.52441233", "0.5242975", "0.5242559", "0.5241522", "0.52415025", "0.5224424", "0.52197784", "0.5219061", "0.5206953", "0.5206208", "0.52044713", "0.520379", "0.52035", "0.52025115", "0.5191939", "0.5190559", "0.5185056", "0.5184606", "0.51759386", "0.51744664", "0.5170952", "0.5160977", "0.51602405", "0.51471466", "0.5141524", "0.5139184" ]
0.65077806
0
Generate immediate (different by one mismatch) neighbours of the given genome pattern
def _generate_immediate_neighbours(pattern: str) -> list: generated = [] for i in range(len(pattern)): if pattern[i] == 'A': generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_A]) elif pattern[i] == 'C': generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_C]) elif pattern[i] == 'T': generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_T]) elif pattern[i] == 'G': generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_G]) return generated
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_neighbours(pattern: str, mismatches: int) -> set:\n neighbourhood = set()\n neighbourhood.add(pattern)\n\n curr_patterns = [pattern]\n next_patterns = []\n\n for curr_mismatches in range(mismatches):\n for curr_pattern in curr_patterns:\n for neighbour in _generate_immediate_neighbours(curr_pattern):\n if neighbour not in neighbourhood:\n neighbourhood.add(neighbour)\n next_patterns.append(neighbour)\n\n curr_patterns = next_patterns\n next_patterns = []\n\n return neighbourhood", "def neighbors(pattern, d):\n\n if d == 0:\n return [pattern]\n if len(pattern) == 1:\n return ['A', 'C', 'G', 'T']\n neighborhood = []\n suffix_pattern = pattern[1:]\n suffix_neighbors = neighbors(suffix_pattern, d)\n for text in suffix_neighbors:\n hdist = compute_hamming_distance(suffix_pattern, text)\n if hdist < d:\n for n in ['A', 'C', 'G', 'T']:\n neighbor = n + text\n neighborhood.append(neighbor)\n else:\n neighbor = pattern[0] + text\n neighborhood.append(neighbor)\n return neighborhood", "def getNeighbours(seg,meta,inversedIndex):\n return np.unique(np.fromiter( (inversedIndex[x] for x in np.concatenate([meta.loc[seg]['ins'],meta.loc[seg]['outs']])),dtype=np.int))", "def get_neighbours(self, grid):\n\t\tfor diff in ((-1, 0), (1, 0), (0, -1), (0, 1)):\n\t\t\tres = Vector((self.row, self.col)) + diff\n\t\t\tif res[0] >= 0 and res[1] >= 0 and res[0] < len(grid) and res[1] < len(grid[0]):\n\t\t\t\tyield grid[res[0]][res[1]]", "def get_neighbours_8(x, y):\n return [(x - 1, y - 1), (x, y - 1), (x + 1, y - 1), \\\n (x - 1, y), (x + 1, y), \\\n (x - 1, y + 1), (x, y + 1), (x + 1, y + 1)]", "def neighbors_generator(state: str, nurses_number=10) -> str:\n\n genes = 21 * nurses_number\n\n # Random index to change and generated the neighbor\n index = randrange(0, genes)\n\n # Here we're taking the first part of the state before the bit that will be modified\n new_state = state[0:index]\n\n # Here is modified the bit\n if state[index] == '0':\n new_state += '1'\n else:\n new_state += '0'\n\n # Here we're taking the last part of the state passed\n new_state += state[index+1:]\n\n # Here is returned the new state and the next bit to be modified\n return new_state", "def _get_neighbours(kmer):\n assert (is_dna(kmer))\n bases = 'ACTG'\n result = set()\n for i in range(len(kmer)):\n for base in bases:\n result.add(kmer[:i] + base + kmer[(i + 1):])\n return result", "def find_neighbours(engine, field, features):\n code = CodeSegment(engine)\n N = len(engine.q)\n Nf = 3 ** engine.pm.ndim\n code.assign(x=Literal(numpy.zeros((N, Nf))), y='features')\n grid = engine.pm.generate_uniform_particle_grid(shift=0)\n for i in range(Nf):\n ii = i\n a = []\n for d in range(engine.pm.ndim):\n a.append(ii % 3 - 1)\n ii //= 3\n\n grid1 = grid + numpy.array(a[::-1]) * (engine.pm.BoxSize / engine.pm.Nmesh)\n layout = engine.pm.decompose(grid1)\n code.readout(x=Literal(grid1), mesh='field', value='feature1', layout=Literal(layout), resampler='nearest')\n code.assign_component(attribute='features', value='feature1', dim=i)\n return code", "def find_pattern(pattern, genome):\n\n tens_table = [pow(10, m) for m in xrange(len(pattern))]\n hash_pattern = get_hash(pattern, tens_table)\n index = []\n for current_index in xrange(len(genome) - len(pattern) + 1):\n\t\tif current_index == 0:\n\t\t\tcurrent_hash = get_hash(genome[0:len(pattern)], tens_table)\n\t\telse:\n\t\t\tcurrent_hash = ((current_hash - (nucleotide_value_map[genome[current_index-1]] * tens_table[len(pattern)-1])) * 10 + nucleotide_value_map[genome[current_index-1+len(pattern)]])\n if current_hash == hash_pattern:\n index.append(current_index)\n return index", "def neighbors(pattern, d):\n if d == 0:\n return pattern\n\n if len(pattern) == 1:\n return ['A', 'C', 'G', 'T']\n\n neighborhood = []\n\n # ##########\n # We can use recursion to successively compute neighbors(suffix(pattern), d),\n # where suffix(pattern) = pattern[1:]\n #\n # The reason being: if we have neighbors(suffix(pattern, d)), then we know\n # that the Hamming Distance between `pattern` and `suffix(pattern)` is either equal\n # to d or less than d.\n #\n # In the first case, we can add `pattern[0]` to the beginning of\n # `suffix(pattern)` in order to obtain a k-mer belonging to\n # Neighbors(Pattern, d). In the second case, we can add any symbol\n # to the beginning of `suffix(pattern)` and obtain a k-mer belonging\n # to Neighbors(Pattern, d).\n # ##########\n\n suffix_pattern = pattern[1:]\n suffix_neighbors = neighbors(suffix_pattern, d)\n\n for i in range(len(suffix_neighbors)):\n\n neighboring_pattern_text = suffix_neighbors[i]\n\n if hamming_distance(suffix_pattern, neighboring_pattern_text) < d:\n for n in _NUCLEOTIDES:\n neighborhood.append(n + neighboring_pattern_text)\n\n else:\n neighborhood.append(pattern[0] + neighboring_pattern_text)\n\n return neighborhood", "def _get_neighbours(self, position):\n grid = self._grid\n x, y = position\n neighbours = []\n offsets = [(0,1),(1,0),(0,-1),(-1,0)]\n shuffle(offsets)\n for offset in offsets:\n i, j = offset\n position = (x + i, y + j)\n if grid.valid_position(position) and position not in self.shots:\n neighbours.append(position)\n return neighbours", "def _get_neighbours(self, pos, input_data):\r\n neighbours = []\r\n\r\n start = AlignmentOutputData.table_values[pos.y][pos.x]\r\n diagonal = float(strings.NAN)\r\n up = float(strings.NAN)\r\n left = float(strings.NAN)\r\n\r\n cur_char_seq_1 = strings.EMPTY\r\n cur_char_seq_2 = strings.EMPTY\r\n\r\n if pos.y - 1 >= 0 and pos.x - 1 >= 0:\r\n diagonal = AlignmentOutputData.table_values[pos.y - 1][pos.x - 1]\r\n\r\n if pos.y - 1 >= 0:\r\n up = AlignmentOutputData.table_values[pos.y - 1][pos.x]\r\n\r\n if pos.x - 1 >= 0:\r\n left = AlignmentOutputData.table_values[pos.y][pos.x - 1]\r\n\r\n if pos.y - 1 >= 0:\r\n cur_char_seq_1 = input_data.sequence_a[pos.y - 1]\r\n if pos.x - 1 >= 0:\r\n cur_char_seq_2 = input_data.sequence_b[pos.x - 1]\r\n\r\n matching = start == diagonal + input_data.cost_function.get_value(cur_char_seq_1, cur_char_seq_2)\r\n deletion = start == up + input_data.gap_cost\r\n insertion = start == left + input_data.gap_cost\r\n\r\n if matching:\r\n neighbours.append(Vector(pos.x - 1, pos.y - 1))\r\n\r\n if insertion:\r\n neighbours.append(Vector(pos.x - 1, pos.y))\r\n\r\n if deletion:\r\n neighbours.append(Vector(pos.x, pos.y - 1))\r\n\r\n return neighbours", "def neighbours(num):\n num = str(num)\n num = '0'*(4-len(num))+num # Prepend 0 until length is 4\n\n return [\n int(add_wo_carry(num, '0001')),\n int(add_wo_carry(num, '0010')),\n int(add_wo_carry(num, '0100')),\n int(add_wo_carry(num, '1000')),\n int(sub_wo_carry(num, '0001')),\n int(sub_wo_carry(num, '0010')),\n int(sub_wo_carry(num, '0100')),\n int(sub_wo_carry(num, '1000'))]", "def get_neighbour(self, y, x):\n if [y, x] in self.mine_locations:\n return Minesweeper.BOMB\n count = 0\n # (x-1, y-1), (x, y-1), (x+1, y-1),\n # (x-1, y), (x, y), (x+1, y),\n # (x-1, y+1), (x, y+1), (x+1, y+1)\n for xe in range(x - 1, x + 2):\n for ye in range(y - 1, y + 2):\n if [ye, xe] in self.mine_locations:\n count += 1\n return str(count)", "def neighbours(x, y):\n n = []\n for c in ((y-1, x-1), (y-1, x), (y-1, x+1), (y, x-1), (y, x+1), (y+1, x-1), (y+1, x), (y+1, x+1)):\n n.append(c)\n return n", "def neighbours(indexing, random_stream=None):\n\n # pre-compute some necessary values\n counts = compute_index_counts(indexing)\n binary_sm = compute_binary_set_mappings(indexing, counts)\n unary_sm = compute_unary_set_mappings(indexing, counts)\n empty = find_empty(counts)\n image = [idx for idx,count in enumerate(counts) if count != 0]\n \n def candidates(vertex, index, image, binary_sm, unary_sm, counts, empty):\n \"\"\"generates the set of possible target indices for a given vertex\n\n :param vertex: the vertex\n :type vertex: int\n :param index: the current index of the vertex\n :type index: int\n :param image: the image of the current indexing\n :type image: list\n :param binary_sm: result of `compute_binary_set_mappings`\n :type binary_sm: np.array[n,dtype=int]\n :param unary_sm: result of `compute_unary_set_mappings`\n :type unary_sm: np.array[n,dtype=int]\n :param counts: number of vertices/index\n :type counts: np.array[n,dtype=int]\n :param empty: an index that is assigned no vertex, None is also allowed\n :type empty: int/None\n :yield: iterator over target indices\n :rtype: Iterator[int]\n \"\"\"\n for k in image:\n if k == index:\n continue\n if counts[index] > 1 or counts[k] > 1:\n yield k\n elif vertex < unary_sm[k]: # implicitly: counts[index]==1 and counts[k]==1\n yield k\n if counts[index] > 2 or (counts[index] == 2 and vertex==binary_sm[index]):\n yield empty\n \n if random_stream is not None:\n # Random Move-Enumeration\n pweights = compute_probability_weights(indexing, counts, image, binary_sm)\n vertices = np.random.choice(indexing.shape[0], random_stream, p=pweights)\n for vertex in vertices:\n index = indexing[vertex]\n ks = list(candidates(vertex, index, image, binary_sm, unary_sm, counts, empty))\n k = random.choice(ks)\n yield vertex, k\n else:\n # Move-Enumeration\n for vertex, index in enumerate(indexing):\n for k in candidates(vertex, index, image, binary_sm, unary_sm, counts, empty):\n yield vertex, k", "def neighbour(seq):\n it = iter(seq)\n it_next = itertools.islice(itertools.chain(iter(seq), [None]), 1, None)\n\n prev = None\n for curr, next in zip(it, it_next):\n yield(prev, curr, next)\n prev = curr", "def _neuron_location(self, m, n):\n for i in range(m):\n for j in range(n):\n yield np.array([i, j])", "def neighbors(pattern, d):\n tides = set([\"A\", \"C\", \"G\", \"T\"])\n if d == 0:\n return set([pattern])\n if len(pattern) == 1:\n return tides\n neighborhood = set([])\n suffix_neighbors = neighbors(pattern[1:], d)\n for text in suffix_neighbors:\n if ham_dist(pattern[1:], text) < d:\n for tide in tides:\n neighborhood.add(tide + text)\n else:\n neighborhood.add(pattern[0] + text)\n return neighborhood", "def island():\n\n grid = [\n [\"1\", \"1\", \"0\", \"0\", \"0\"],\n [\"1\", \"1\", \"0\", \"0\", \"0\"],\n [\"0\", \"0\", \"1\", \"0\", \"0\"],\n [\"0\", \"0\", \"0\", \"1\", \"1\"]\n ]\n\n def dfs():\n rows = len(grid)\n cols = len(grid[0])\n count = 0\n for i in range(0, rows):\n for j in range(0, cols):\n if grid[i][j] == '1':\n check_valid(i, j, grid)\n count = count + 1\n return count\n\n def check_valid(i, j, grid=None):\n rows = len(grid)\n cols = len(grid[0])\n\n if not 0 <= i < rows or not 0 <= j < cols or grid[i][j] != '1':\n return\n\n grid[i][j] = '0'\n\n check_valid(i + 1, j, grid)\n check_valid(i - 1, j, grid)\n check_valid(i, j + 1, grid)\n check_valid(i, j - 1, grid)\n\n return dfs()", "def compute_neighbours(index, matrix):\n row, col = decode_to_matrix_cell(index, matrix)\n n1 = index + 1\n if n1 >= matrix.size or col == matrix.cols - 1:\n n1 = None\n\n n2 = index + matrix.cols\n if n2 >= matrix.size or row == matrix.rows - 1:\n n2 = None\n return n1, n2,", "def countNeighbors(oldgen, x, y):\n temp = 1\n\n count = 0\n for i in range(-1, 2):\n for j in range(-1, 2):\n\n # TODO: this needs rewritin to be more understandable\n if not (i == 0 and j == 0):\n count += int(oldgen[(x + i + WID) % WID][(y + j + HGT) % HGT])\n\n for i in range(-1, 2):\n for j in range(-1, 2):\n temp += 1\n\n count -= int(oldgen[x][y])\n\n return count", "def _get_neighbors(cls, pattern: str, max_distance: int) -> List[str]:\n return get_neighborhood(pattern, ''.join(cls.nucleobases.keys()), max_distance)", "def get_neighbours(self, cell: Position) -> Iterable[Position]:\n x, y = cell\n\n return [\n (x - 1, y - 1), (x, y - 1), (x + 1, y - 1),\n (x - 1, y), (x + 1, y),\n (x - 1, y + 1), (x, y + 1), (x + 1, y + 1),\n ]", "def get_neighbors(pattern, d):\n # if no difference\n if d == 0:\n return [pattern]\n # if no pattern\n if len(pattern) == 1:\n return ['A', 'C', 'T', 'G']\n # initialize the container\n neighborhood = set()\n # checking for the suffix patterns\n neighbors = get_neighbors(pattern[1:], d)\n # iterates through the neighbors\n for kmer in neighbors:\n # check for the allowed distance\n if hamming_distance(pattern[1:], kmer) < d:\n # iterates through the charcater/bases\n for char in ['A', 'C', 'T', 'G']:\n # add the character to the suffix payyern\n neighborhood.add(char + kmer)\n else:\n # otherwise add the first character again\n neighborhood.add(pattern[0] + kmer)\n return sorted(list(neighborhood))", "def iter_neighbors(x: int, y: int) -> t.Generator[COORDINATE, None, None]:\n yield x - 1, y\n yield x + 1, y\n yield x, y - 1\n yield x, y + 1", "def neighbours(assignment): \n for index_1, index_2 in itertools.combinations(range(len(assignment)), 2):\n new_assign = list(assignment)\n new_assign[index_1], new_assign[index_2] = new_assign[index_2], new_assign[index_1]\n yield tuple(new_assign)", "def test_multigrid_calculates_neighbours_correctly():\n\n # create a grid which will result in 9 cells\n h = 64\n img_dim = (3 * h + 1, 3 * h + 1)\n amg = mg.MultiGrid(img_dim, h, WS=127)\n\n # check that each cell has the expected neighbours\n print(amg.n_cells)\n\n # expected neieghbours left to right, bottom to top\n cells = [{\"north\": amg.cells[3], \"east\": amg.cells[1], \"south\": None, \"west\": None}, # bl\n {\"north\": amg.cells[4], \"east\": amg.cells[2],\n \"south\": None, \"west\": amg.cells[0]}, # bm\n {\"north\": amg.cells[5], \"east\": None,\n \"south\": None, \"west\": amg.cells[1]}, # br\n {\"north\": amg.cells[6], \"east\": amg.cells[4],\n \"south\": amg.cells[0], \"west\": None}, # ml\n {\"north\": amg.cells[7], \"east\": amg.cells[5],\n \"south\": amg.cells[1], \"west\": amg.cells[3]}, # mm\n {\"north\": amg.cells[8], \"east\": None,\n \"south\": amg.cells[2], \"west\": amg.cells[4]}, # mr\n # tl\n {\"north\": None, \"east\": amg.cells[7],\n \"south\": amg.cells[3], \"west\": None},\n # tm\n {\"north\": None,\n \"east\": amg.cells[8], \"south\": amg.cells[4], \"west\": amg.cells[6]},\n {\"north\": None, \"east\": None,\n \"south\": amg.cells[5], \"west\": amg.cells[7]}, # tr\n ]\n\n for ii, (gc, cell) in enumerate(zip(amg.cells, cells)):\n print(ii)\n assert gc.north == cell['north']\n assert gc.east == cell['east']\n assert gc.south == cell['south']\n assert gc.west == cell['west']", "def neighbours(ar, cur_index, cnt_of_neiboors=3, exclude_from_neibors_index=[]):\n rmax = np.max([0, cur_index + cnt_of_neiboors - len(ar)])\n lmin = np.max([cur_index - (cnt_of_neiboors + rmax), 0])\n\n excl = set(exclude_from_neibors_index) | {cur_index}\n nbs = [i for i in range(lmin, len(ar)) if i not in excl]\n return ar[nbs[:cnt_of_neiboors * 2]]", "def neighbours(number: int, number_sectors: int) -> [int, int, int, int]:\n col = number % number_sectors\n row = number // number_sectors\n\n nieg = [number - number_sectors, number + number_sectors, number - 1, number + 1]\n\n if row == 0:\n nieg[0] = -1\n if row == number_sectors - 1:\n nieg[1] = -1\n if col == 0:\n nieg[2] = -1\n if col == number_sectors - 1:\n nieg[3] = -1\n return nieg", "def get_neighbours(self):\n n = [deepcopy(self.representation) for i in range(len(self.representation) - 1)]\n\n for count, i in enumerate(n):\n i[count], i[count + 1] = i[count + 1], i[count]\n\n n = [Individual(i) for i in n]\n return n", "def neighbours(input_configuration, position):\n\n row_pos, seat_pos = position\n return [(check_row, check_seat)\n for check_row in range (row_pos-1, row_pos + 2) for check_seat in range (seat_pos-1, seat_pos+2)\n if (check_row != row_pos or check_seat != seat_pos)\n and (check_row, check_seat) in input_configuration.keys()]", "def neighbors((min_i, min_j), (max_i, max_j), (i, j)):\n if j + 1 <= max_j:\n yield (i, j + 1)\n if j - 1 >= min_j:\n yield (i, j - 1)\n if i + 1 <= max_i:\n yield (i + 1, j)\n if i - 1 >= min_i:\n yield (i - 1, j)", "def get_2_step_neighbours(node):\n for i in range(len(node)):\n yield node[0:i] + (flip(node[i]),) + node[i+1:]\n\n for i, j in itertools.permutations(range(len(node)), 2):\n if i < j:\n yield node[0:i] + (flip(node[i]),) + node[i+1:j] + (flip(node[j]),) + node[j+1:]", "def neighbours((u,v)):\r\n return ((u,v+1), (u+1,v), (u,v-1), (u-1,v))", "def Neighbourgs(abcd, h):\n\n Nelem = len(abcd)\n\n a = abcd[h][0]\n b = abcd[h][1]\n c = abcd[h][2]\n d = abcd[h][3]\n\n el1, el2, el3, el4 = 0, 0, 0, 0\n\n N = 0\n\n for j in range(0, Nelem - 1):\n\n if N == 4:\n break\n\n if a in abcd[j, :] and b in abcd[j, :] and j != h:\n N += 1\n el1 = j + 1\n\n if b in abcd[j, :] and c in abcd[j, :] and j != h:\n N += 1\n el2 = j + 1\n\n if c in abcd[j, :] and d in abcd[j, :] and j != h:\n N += 1\n el3 = j + 1\n\n if d in abcd[j, :] and a in abcd[j, :] and j != h:\n N += 1\n el4 = j + 1\n\n return [el1, el2, el3, el4]", "def get_neighbours(self):\r\n n = [deepcopy(self.representation) for i in range(len(self.representation) - 1)]\r\n\r\n for count, i in enumerate(n):\r\n i[count], i[count + 1] = i[count + 1], i[count]\r\n\r\n n = [Individual(i) for i in n]\r\n return n", "def neighbors(self, cell):\n x = cell.x\n y = cell.y\n for new_x, new_y in [(x, y - 1), (x, y + 1), (x - 1, y), (x + 1, y)]:\n neighbor = self[new_x, new_y]\n if neighbor is not None:\n yield neighbor", "def findImmediateNeighbours(self):\n immediateNeighbours = []\n\n if self.xCoordinate - 1 > 0:\n immediateNeighbours.append(PixelPosition(self.xCoordinate - 1, self.yCoordinate))\n\n if self.xCoordinate + 1 < 395:\n immediateNeighbours.append(PixelPosition(self.xCoordinate + 1, self.yCoordinate))\n\n if self.yCoordinate + 1 < 500:\n immediateNeighbours.append(PixelPosition(self.xCoordinate, self.yCoordinate + 1))\n\n if self.yCoordinate - 1 > 0:\n immediateNeighbours.append(PixelPosition(self.xCoordinate, self.yCoordinate - 1))\n\n return immediateNeighbours", "def adjacent_tiles(self,tile,pattern):\n\n # Initialize the list of tiles to return\n adj_tiles = []\n\n # Find the row and column of the input tile\n for i in self.tilelist:\n for j in i:\n if j == tile:\n row = self.tilelist.index(i)\n column = self.tilelist[row].index(j)\n\n # Define functions for the 2 distinct patterns\n def plus_sign(self,row,column):\n nonlocal adj_tiles\n if row - 1 >= 0:\n adj_tiles += [self.tilelist[row - 1][column]]\n if row + 1 != len(self.tilelist):\n adj_tiles += [self.tilelist[row + 1][column]]\n if column - 1 >= 0 :\n adj_tiles += [self.tilelist[row][column - 1]]\n if column + 1 != len(self.tilelist[row]):\n adj_tiles += [self.tilelist[row][column + 1]]\n\n def diagonal(self,row,column):\n nonlocal adj_tiles\n if column - 1 >= 0:\n if row - 1 >= 0:\n adj_tiles += [self.tilelist[row - 1][column - 1]]\n if row + 1 != len(self.tilelist):\n adj_tiles += [self.tilelist[row + 1][column - 1]]\n if column + 1 != len(self.tilelist[row]):\n if row - 1 >= 0:\n adj_tiles += [self.tilelist[row - 1][column + 1]]\n if row + 1 != len(self.tilelist):\n adj_tiles += [self.tilelist[row + 1][column + 1]]\n\n # Return the tiles that form a plus sign with the given input tile\n if pattern == 'p':\n plus_sign(self,row,column)\n\n # Return the tiles touching the four corners of the input tile\n elif pattern == 'x':\n diagonal(self,row,column)\n\n # Return all of the tiles surrounding the input tile\n elif pattern == 'b':\n plus_sign(self,row,column)\n diagonal(self,row,column)\n\n return adj_tiles", "def test_split_adds_known_neighbours(mock_amg):\n\n mock_amg.cells[4].split()\n # bl\n assert mock_amg.cells[-4].north is mock_amg.cells[-2]\n assert mock_amg.cells[-4].east is mock_amg.cells[-3]\n\n # br\n assert mock_amg.cells[-3].north is mock_amg.cells[-1]\n assert mock_amg.cells[-3].west is mock_amg.cells[-4]\n\n # tl\n assert mock_amg.cells[-2].south is mock_amg.cells[-4]\n assert mock_amg.cells[-2].east is mock_amg.cells[-1]\n\n # tr\n assert mock_amg.cells[-1].south is mock_amg.cells[-3]\n assert mock_amg.cells[-1].west is mock_amg.cells[-2]", "def _neuron_locations(self, m, n):\n #Nested iterations over both dimensions\n #to generate all 2-D locations in the map\n for i in range(m):\n for j in range(n):\n yield np.array([i, j])", "def _neuron_locations(self, m, n):\n #Nested iterations over both dimensions\n #to generate all 2-D locations in the map\n for i in range(m):\n for j in range(n):\n yield np.array([i, j])", "def neighborhood((y, x), (height, width)):\n return [(yt, xt) for xt in [x + 1, x, x - 1]\n for yt in [y + 1, y, y - 1]\n if 0 <= xt < width and 0 <= yt < height\n and (xt, yt) != (x, y)]", "def get_neighbours(lat, long):\n # ns = north east, ew = east west (ratio between 1 feet and degree) \n # its different on diferent places on earth (sphere)!!\n ns = 0.0025\n ew = 0.0025\n walk = []\n for i in range(-2, 3):\n for j in range(-2, 3):\n thiscell = CellId.from_lat_lng(LatLng.from_degrees(lat + ns*i, long + ew*j)).parent(S2_CELL_LEVEL)\n if abs(i * j) < 4:\n walk.append(thiscell.id())\n return sorted(walk)", "def make_n_glycan_neighborhoods():\n neighborhoods = NeighborhoodCollection()\n\n _neuraminic = \"(%s)\" % ' + '.join(map(str, (\n FrozenMonosaccharideResidue.from_iupac_lite(\"NeuAc\"),\n FrozenMonosaccharideResidue.from_iupac_lite(\"NeuGc\")\n )))\n _hexose = \"(%s)\" % ' + '.join(\n map(str, map(FrozenMonosaccharideResidue.from_iupac_lite, ['Hex', ])))\n _hexnac = \"(%s)\" % ' + '.join(\n map(str, map(FrozenMonosaccharideResidue.from_iupac_lite, ['HexNAc', ])))\n\n high_mannose = CompositionRangeRule(\n _hexose, 3, 12) & CompositionRangeRule(\n _hexnac, 2, 2) & CompositionRangeRule(\n _neuraminic, 0, 0)\n high_mannose.name = \"high-mannose\"\n neighborhoods.add(high_mannose)\n\n base_hexnac = 3\n base_neuac = 2\n for i, spec in enumerate(['hybrid', 'bi', 'tri', 'tetra', 'penta', \"hexa\", \"hepta\"]):\n if i == 0:\n rule = CompositionRangeRule(\n _hexnac, base_hexnac - 1, base_hexnac + 1\n ) & CompositionRangeRule(\n _neuraminic, 0, base_neuac) & CompositionRangeRule(\n _hexose, base_hexnac + i - 1,\n base_hexnac + i + 3)\n rule.name = spec\n neighborhoods.add(rule)\n else:\n sialo = CompositionRangeRule(\n _hexnac, base_hexnac + i - 1, base_hexnac + i + 1\n ) & CompositionRangeRule(\n _neuraminic, 1, base_neuac + i\n ) & CompositionRangeRule(\n _hexose, base_hexnac + i - 1,\n base_hexnac + i + 2)\n\n sialo.name = \"%s-antennary\" % spec\n asialo = CompositionRangeRule(\n _hexnac, base_hexnac + i - 1, base_hexnac + i + 1\n ) & CompositionRangeRule(\n _neuraminic, 0, 1 if i < 2 else 0\n ) & CompositionRangeRule(\n _hexose, base_hexnac + i - 1,\n base_hexnac + i + 2)\n\n asialo.name = \"asialo-%s-antennary\" % spec\n neighborhoods.add(sialo)\n neighborhoods.add(asialo)\n return neighborhoods", "def motif_enumeration(dna, k, d):\n\n patterns = []\n neighborhoods = []\n for dna_string in dna:\n len_dna_string = len(dna_string)\n neighborhood = []\n for i in range(len_dna_string - k + 1):\n pattern = dna_string[i:i + k]\n dna_neighbors = neighbors(pattern, d)\n neighborhood = neighborhood + dna_neighbors\n neighborhoods.append(neighborhood)\n for n in neighborhoods[0]:\n count = 0\n for i in range(1, len(neighborhoods)):\n if n in neighborhoods[i]:\n count = count + 1\n if count == len(neighborhoods) - 1 and n not in patterns:\n patterns.append(n)\n return patterns", "def check_ext(im, i, j):\n neighb = 0\n count = 0\n for a in range(8):\n if (im[i+relpos[a][0], j+relpos[a][1]] and (count == 0)):\n count += 1\n neighb += 1\n else:\n count = 0\n return (neighb < 2)", "def get_neighbours(x, y, board):\n return [get_left(x, y, board), get_upper(x, y, board), get_right(x, y, board), get_lower(x, y, board)]", "def compute_pattern(n):\n for x in range(1,n):\n for y in range(x, x*2):\n print(y, end= \" \")\n print()", "def findNeighbours(self):\n neighbours = []\n\n for i in range(self.xCoordinate - 1, self.xCoordinate + 2):\n for j in range(self.yCoordinate - 1, self.yCoordinate + 2):\n if (not (i == self.xCoordinate and j == self.yCoordinate)) and (0 <= i <= 394 and 0 <= j <= 499):\n neighbours.append(PixelPosition(i, j))\n\n return neighbours", "def test_make_neighbors(position):\n\n def result_row(i, size):\n return [i] + [i + 1] * (size - 2) + [i]\n\n size = position.size\n neigh_counts = [0] * (size ** 2)\n first_row = result_row(2, size)\n last_row = result_row(2, size)\n middle_row = result_row(3, size)\n desired_result = first_row + (middle_row) * (size - 2) + last_row\n\n for c, neighs in go.make_neighbors(size=size):\n for pt in list(neighs):\n neigh_counts[pt] += 1\n\n assert desired_result == neigh_counts", "def original(arr):\n height = np.shape(arr)[0]\n width = np.shape(arr)[1]\n result = np.array(arr)\n\n for row in range(height):\n for col in range(width):\n neighbors = 0\n val = result[row][col]\n for i in range(-1, 2):\n for j in range(-1, 2):\n if i == 0 and j == 0: # The cell itself cannot be counted as a neighbor\n continue\n if row + i < 0 or col + j < 0 or row + i > height or col + j > width: # Out of bounds\n continue\n with suppress(IndexError):\n if arr[row + i][col + j] == 1:\n neighbors += 1\n\n if neighbors == 3 and val == 0: # Cell becomes alive\n result[row][col] = 1\n\n elif neighbors > 3 and val == 1 or neighbors < 2 and val == 1: # Cell dies\n result[row][col] = 0\n\n return result", "def neighbors(self):\n \n # find 0 - blank square\n \n x0 = None\n y0 = None\n \n for i in range(4):\n for j in range(4):\n if self.get_tile(i,j) == 0:\n y0 = i\n x0 = j\n\n if x0 == None or y0 == None:\n return []\n \n neighbor_list = []\n \n # move 0 to the right\n if x0 < 3:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0,x0+1)\n new_position.set_tile(y0,x0+1,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'r'\n neighbor_list.append(new_position)\n # move 0 to the left\n if x0 > 0:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0,x0-1)\n new_position.set_tile(y0,x0-1,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'l'\n neighbor_list.append(new_position)\n # move 0 up\n if y0 > 0:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0-1,x0)\n new_position.set_tile(y0-1,x0,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'u'\n neighbor_list.append(new_position)\n # move 0 down\n if y0 < 3:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0+1,x0)\n new_position.set_tile(y0+1,x0,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'd'\n neighbor_list.append(new_position)\n \n return neighbor_list", "def forward(grid:ArcGrid) -> ArcGrid:\n grid = grid.copy()\n x, y = grid.shape\n\n for i in range(x):\n for j in range(y):\n if grid[i,j] != ArcColors.BLACK:\n count_nb_neighbours = 0\n for n in adjacent(grid, (i,j), diag=False):\n if grid[n] != ArcColors.BLACK:\n count_nb_neighbours+= 1\n\n if count_nb_neighbours < 2:\n grid[i,j] = ArcColors.BLACK\n\n return grid", "def neighbours2((u,v)):\r\n\r\n return ((u-1, v+1), (u,v+1), (u+1,v+1), \r\n (u-1,v), (u+1,v),\r\n (u-1,v-1), (u,v-1), (u+1,v-1))", "def get_neighbours_and_directions(self, from_position):\n \n # Transform index into board matrix into index into index into neighbour matrix\n from_row_index = self.board_to_connection_index(from_position)\n row = self.connection_matrix[from_row_index]\n \n neighbours = []\n for col_num in range(0, len(row)): \n if row[col_num]:\n # Transform index into board index\n board_index = self.connection_to_board_index(col_num)\n if self.board[board_index[0]][board_index[1]].state != PegState.EMPTY:\n neighbours.append((board_index, row[col_num])) # Store board index and direction in neighbours\n return neighbours", "def check_neighbours(self, grid):\n if self.bomba:\n self.bombs_around = -1\n return\n\n total = 0\n for x in range(-1, 2):\n for y in range(-1, 2):\n i = self.i + x\n j = self.j + y\n if i > -1 and i < len(grid) and j > -1 and j < len(grid[1]):\n neighbor = grid[i][j]\n\n if neighbor.bomba:\n total += 1\n \n self.bombs_around = total", "def test_get_neighbours(self):\n self.assertEqual(self.game.get_neighbours(2,2), [[1, 1], [1, 2], [1, 3], \n [2, 1], [2, 3], [3, 1], [3, 2], [3, 3]])\n self.assertEqual(self.game.get_neighbours(0,0), [[0, 1], [1, 0], [1, 1]])\n self.assertEqual(self.game.get_neighbours(44,0), [[43, 0], [43, 1], [44, 1]])\n self.assertEqual(self.game.get_neighbours(45,0), [])\n self.assertEqual(self.game.get_neighbours(44,89), [[43, 88], [43, 89], [44, 88]])", "def floodfill(i, j, row, col, island):\n count = 0\n if island[i][j] == 1:\n island[i][j] = 2\n eightdirections = [(1, 0), (-1, 0), (0, 1), (0, -1),\n (1, 1), (1, -1), (-1, 1), (-1, -1)]\n newpositions = [(i+x, j+y) for x, y in eightdirections]\n for posx, posy in newpositions:\n if posx in range(0, row) and posy in range(0, col):\n floodfill(posx, posy, row, col, island)\n count = 1\n return count", "def expand2(self, network, radius):\n expansion = []\n neighbors = frozenset().union(*[set(network.getrow(g).indices) for g in self.genes]) \n for neighb in neighbors: \n if neighb in self.genes:\n continue\n dist_from_pattern = self.dist[network.getrow(neighb).indices] \n dist_of_added = dist_from_pattern[dist_from_pattern > -1].min() + 1\n if dist_of_added > radius:\n continue\n \n next_pattern = copy.deepcopy(self)\n next_pattern.genes.add(neighb)\n #next_pattern.edges.add((pred, neighb))\n next_pattern.added = neighb \n next_pattern.dist[neighb] = dist_of_added\n expansion += [next_pattern]\n\n return expansion if len(expansion) > 0 else [self]", "def _get_neighbours(point):\n # Pull coords out of point.\n x = point[0]\n y = point[1]\n z = point[2]\n return ((x-1, y, z), (x+1, y, z), (x, y-1, z), (x, y+1, z), (x, y, z-1), (x, y, z+1))", "def labelNeighbours26(data, label, x0,y0,z0, index):\n shape = label.shape;\n for xp in range(max(0,-1+x0),min(2+x0, shape[0])):\n for yp in range(max(0,-1+y0),min(2+y0, shape[1])):\n for zp in range(max(0,-1+z0),min(2+z0, shape[2])):\n if data[xp,yp,zp] and label[xp,yp,zp] == 0:\n label[xp,yp,zp] = index;\n label = labelNeighbours26(data, label, xp,yp,zp, index);\n return label;", "def findIsland(i, j, matrix):\n visited = [[ False for ele in row] for row in matrix]\n totalEdges = traverseNodes(i, j, matrix, visited)\n\n return totalEdges", "def open_neighbours(self, y, x):\n if [y, x] in self.mine_locations:\n return [y, x]\n # generate neighbours with positive indexes\n l = [[ye, xe] for xe in range(\n x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0]\n for ye, xe in l:\n # if the indexes are out of the game table, skip\n if xe >= self.x or ye >= self.y:\n continue\n # if the current coordinates are still untouched, update their values\n if self.table_state[ye][xe] == '-':\n self.table_state[ye][xe] = self.final_table[ye][xe]\n # if the coordinate has a value of 0, recursively open it's neighbours.\n if self.final_table[ye][xe] == '0':\n self.open_neighbours(ye, xe)", "def generate_all_locations(grid, shape):", "def find_neighbors(self):\n #checked#\n ###your code here###\n for address in self.homes:\n for i in range(-1, 2):\n for j in range(-1,2):\n neighbor_address=(address[0]+i, address[1]+j)\n if neighbor_address in self.homes and neighbor_address!=address:\n self.homes[address].neighbors.append(self.homes[neighbor_address])", "def set_pattern(self, pattern):\n for ir, row in enumerate(pattern):\n for ic, col in enumerate(row):\n relay_n = ir*len(row) + ic\n self.relay.set(relay_n, bool(col))", "def neighborhood(index, npoints, maxdist=1):\n return [index + i for i in range(-maxdist, maxdist + 1)\n if i != 0 and 0 <= index + i <= npoints - 1]", "def neighbour_cells(id, Nx):\n r = cell_coord(id, Nx)\n neighs = []\n tmp = np.arange(3) - 1\n for p in itertools.product(tmp, tmp, tmp):\n neigh = (r + p) % Nx\n neighs.append(neigh)\n return [id_from_coord(neigh, Nx) for neigh in neighs]", "def get_neighbours(point, grid):\n # possible movements (diagonally is impossible)\n dy, dx = [-1, 0, 1, 0], [0, 1, 0, -1]\n\n neighbours = []\n for i in range(4):\n y, x = point[0] + dy[i], point[1] + dx[i]\n\n # skip if not within maze's bounds (NOT actually needed since there is a \"#\" barrier around the maze)\n # if not (0 <= x < len(grid) and 0 <= y < len(grid[0])):\n # continue\n\n point_type = grid[y][x]\n if point_type == \"#\": # skip if wall\n continue\n neighbours.append((y, x))\n\n return neighbours", "def gray_points(self, sag_sequence, cor_sequence):\n\n # python register.py -side=1 -imgnumber=9 # right lung (user view)\n\n def diff_from_zero(value):\n return value != 0\n\n column = self.cor_sequences.index(cor_sequence)\n row = self.sag_sequences.index(sag_sequence)\n\n neighbor_up = row - 1\n neighbor_down = row + 1\n neighbor_right = column + 1\n neighbor_left = column - 1\n neighors_values = list()\n\n # print(\">>>> Sag. Seq: {}\".format(sag_sequence))\n # print(\">>>> Cor. Seq: {}\".format(cor_sequence))\n # print(\"{} x {}\\n\".format(row, column))\n\n numrows = self.matRegistration.shape[0]\n numcols = self.matRegistration.shape[1]\n # column = self.cor_sequences.index(cor_sequence)\n # row = self.sag_sequences.index(sag_sequence)\n\n if self.matRegistration[row, column] == 0.0:\n # print(\"X: \\n{}\\n\".format(self.matX))\n # print(\"Y: \\n{}\\n\".format(self.matY))\n # print(\"Z: \\n{}\\n\".format(self.matZ))\n # print(\"DL: \\n{}\\n\".format(self.matDL))\n\n if row == 0 and column == 0:\n # print(\"Top left corner\")\n # print(\"Neighbor right: {}\".format(self.matX[row][neighbor_right]))\n # print(\"Neighbor down: {}\\n\".format(self.matX[neighbor_down][column]))\n\n neighors_values.append(self.matDL[row][neighbor_right])\n neighors_values.append(self.matDL[neighbor_down][column])\n # Get only neighbors that value is different from zero\n neighors_values = list(filter(diff_from_zero, neighors_values))\n # Calculates the mean of the values of the neighboring points that are different from zero\n meanDL = int(sum(neighors_values) / len(neighors_values))\n # Coordinate of 2D point to be converted to 3D space\n lpts = [(self.cor_columns[row], meanDL)]\n # print(\"Points: {}\\n\".format(lpts))\n # Converto 2D points to 3D space\n X, Y, Z = self.pc.point3D(plan='Coronal', sequence=cor_sequence, imgnum=1, pts=lpts)\n # print(\"Z: {}\\n\".format(Z))\n # Update the matrices in the position analyzed\n self.matX[row, column] = X[0]\n self.matY[row, column] = Y[0]\n self.matZ[row, column] = Z[0]\n neighors_values = []\n\n '''\n # Z\n neighors_values.append(self.matZ[row][neighbor_right])\n neighors_values.append(self.matZ[neighbor_down][column])\n neighors_values = list(filter(diff_from_zero, neighors_values))\n mean = sum(neighors_values) / len(neighors_values)\n # print(\"Neighbors values: {}\\n\".format(neighors_values))\n # print(\"Mean: {}\\n\".format(mean))\n self.matZ[row][column] = mean\n # print(\"Current position: {}\\n\".format(self.matX[row][column]))\n neighors_values = []\n '''\n\n self.matRegistration[row][column] = self.gray\n\n elif row == 0 and column == (numcols - 1):\n # print(\"Top right corner\")\n # print(\"Neighbor left: {}\".format(self.matX[row][neighbor_left]))\n # print(\"Neighbor down: {}\\n\".format(self.matX[neighbor_down][column]))\n\n neighors_values.append(self.matDL[row][neighbor_left])\n neighors_values.append(self.matDL[neighbor_down][column])\n neighors_values = list(filter(diff_from_zero, neighors_values))\n meanDL = int(sum(neighors_values) / len(neighors_values))\n lpts = [(self.cor_columns[row], meanDL)]\n # print(\"Points: {}\\n\".format(lpts))\n X, Y, Z = self.pc.point3D(plan='Coronal', sequence=cor_sequence, imgnum=1, pts=lpts)\n # print(\"Z: {}\\n\".format(Z))\n self.matX[row, column] = X[0]\n self.matY[row, column] = Y[0]\n self.matZ[row, column] = Z[0]\n neighors_values = []\n\n '''\n # Z\n neighors_values.append(self.matZ[row][neighbor_left])\n neighors_values.append(self.matZ[neighbor_down][column])\n neighors_values = list(filter(diff_from_zero, neighors_values))\n mean = sum(neighors_values) / len(neighors_values)\n self.matZ[row][column] = mean\n neighors_values = []\n '''\n\n self.matRegistration[row][column] = self.gray\n\n elif row == (numrows - 1) and column == 0:\n # print(\"Bottom left corner\")\n # print(\"Neighbor right: {}\".format(self.matX[row][neighbor_right]))\n # print(\"Neighbor up: {}\\n\".format(self.matX[neighbor_up][column]))\n\n neighors_values.append(self.matDL[row][neighbor_right])\n neighors_values.append(self.matDL[neighbor_up][column])\n neighors_values = list(filter(diff_from_zero, neighors_values))\n meanDL = int(sum(neighors_values) / len(neighors_values))\n lpts = [(self.cor_columns[row], meanDL)]\n # print(\"Points: {}\\n\".format(lpts))\n X, Y, Z = self.pc.point3D(plan='Coronal', sequence=cor_sequence, imgnum=1, pts=lpts)\n # print(\"Z: {}\\n\".format(Z))\n self.matX[row, column] = X[0]\n self.matY[row, column] = Y[0]\n self.matZ[row, column] = Z[0]\n neighors_values = []\n\n '''\n # Z\n neighors_values.append(self.matZ[row][neighbor_right])\n neighors_values.append(self.matZ[neighbor_up][column])\n neighors_values = list(filter(diff_from_zero, neighors_values))\n mean = sum(neighors_values) / len(neighors_values)\n self.matZ[row][column] = mean\n neighors_values = []\n '''\n\n self.matRegistration[row][column] = self.gray\n\n elif row == (numrows - 1) and column == (numcols - 1):\n # print(\"Bottom right column\")\n # print(\"Neighbor left: {}\".format(self.matX[row][neighbor_left]))\n # print(\"Neighbor up: {}\\n\".format(self.matX[neighbor_up][column]))\n\n neighors_values.append(self.matDL[row][neighbor_left])\n neighors_values.append(self.matDL[neighbor_up][column])\n neighors_values = list(filter(diff_from_zero, neighors_values))\n meanDL = int(sum(neighors_values) / len(neighors_values))\n lpts = [(self.cor_columns[row], meanDL)]\n # print(\"Points: {}\\n\".format(lpts))\n X, Y, Z = self.pc.point3D(plan='Coronal', sequence=cor_sequence, imgnum=1, pts=lpts)\n # print(\"Z: {}\\n\".format(Z))\n self.matX[row, column] = X[0]\n self.matY[row, column] = Y[0]\n self.matZ[row, column] = Z[0]\n neighors_values = []\n\n '''\n # Z\n neighors_values.append(self.matZ[row][neighbor_left])\n neighors_values.append(self.matZ[neighbor_up][column])\n neighors_values = list(filter(diff_from_zero, neighors_values))\n mean = sum(neighors_values) / len(neighors_values)\n self.matZ[row][column] = mean\n neighors_values = []\n '''\n\n self.matRegistration[row][column] = self.gray\n\n elif row == 0 and column > 0 and column < (numcols - 1):\n # print(\"Middle first line\")\n # print(\"Neighbor right: {}\".format(self.matX[row][neighbor_right]))\n # print(\"Neighbor left: {}\".format(self.matX[row][neighbor_left]))\n # print(\"Neighbor down: {}\\n\".format(self.matX[neighbor_down][column]))\n\n neighors_values.append(self.matDL[row][neighbor_right])\n neighors_values.append(self.matDL[row][neighbor_left])\n neighors_values.append(self.matDL[neighbor_down][column])\n neighors_values = list(filter(diff_from_zero, neighors_values))\n meanDL = int(sum(neighors_values) / len(neighors_values))\n lpts = [(self.cor_columns[row], meanDL)]\n # print(\"Points: {}\\n\".format(lpts))\n X, Y, Z = self.pc.point3D(plan='Coronal', sequence=cor_sequence, imgnum=1, pts=lpts)\n # print(\"Z: {}\\n\".format(Z))\n self.matX[row, column] = X[0]\n self.matY[row, column] = Y[0]\n self.matZ[row, column] = Z[0]\n neighors_values = []\n\n '''\n # Z\n neighors_values.append(self.matZ[row][neighbor_right])\n neighors_values.append(self.matZ[row][neighbor_left])\n neighors_values.append(self.matZ[neighbor_down][column])\n neighors_values = list(filter(diff_from_zero, neighors_values))\n mean = sum(neighors_values) / len(neighors_values)\n self.matZ[row][column] = mean\n neighors_values = []\n '''\n\n self.matRegistration[row][column] = self.gray\n\n elif row == (numrows - 1) and column > 0 and column < (numcols - 1):\n # print(\"Middle last line\")\n # print(\"Neighbor up: {}\".format(self.matX[neighbor_up][column]))\n # print(\"Neighbor left: {}\".format(self.matX[row][neighbor_left]))\n # print(\"Neighbor right: {}\\n\".format(self.matX[row][neighbor_right]))\n\n # print(\"{} -> {}\\n\".format(self.cor_columns, self.cor_columns[row]))\n neighors_values.append(self.matDL[neighbor_up][column])\n neighors_values.append(self.matDL[row][neighbor_left])\n neighors_values.append(self.matDL[row][neighbor_right])\n neighors_values = list(filter(diff_from_zero, neighors_values))\n meanDL = int(sum(neighors_values) / len(neighors_values))\n lpts = [(self.cor_columns[row], meanDL)]\n # print(\"Points: {}\\n\".format(lpts))\n X, Y, Z = self.pc.point3D(plan='Coronal', sequence=cor_sequence, imgnum=1, pts=lpts)\n # print(\"Z: {}\\n\".format(Z))\n self.matX[row, column] = X[0]\n self.matY[row, column] = Y[0]\n self.matZ[row, column] = Z[0]\n neighors_values = []\n\n '''\n # Z\n neighors_values.append(self.matZ[neighbor_up][column])\n neighors_values.append(self.matZ[row][neighbor_left])\n neighors_values.append(self.matZ[row][neighbor_right])\n neighors_values = list(filter(diff_from_zero, neighors_values))\n mean = sum(neighors_values) / len(neighors_values)\n self.matZ[row][column] = mean\n neighors_values = []\n '''\n\n self.matRegistration[row][column] = self.gray\n\n elif row > 0 and row < (numrows - 1) and column == 0:\n # print(\"Middle first column\")\n # print(\"Neighbor up: {}\".format(self.matX[neighbor_up][column]))\n # print(\"Neighbor down: {}\".format(self.matX[neighbor_down][column]))\n # print(\"Neighbor right: {}\\n\".format(self.matX[row][neighbor_right]))\n\n neighors_values.append(self.matDL[neighbor_up][column])\n neighors_values.append(self.matDL[neighbor_down][column])\n neighors_values.append(self.matDL[row][neighbor_right])\n neighors_values = list(filter(diff_from_zero, neighors_values))\n meanDL = int(sum(neighors_values) / len(neighors_values))\n lpts = [(self.cor_columns[row], meanDL)]\n # print(\"Points: {}\\n\".format(lpts))\n X, Y, Z = self.pc.point3D(plan='Coronal', sequence=cor_sequence, imgnum=1, pts=lpts)\n # print(\"Z: {}\\n\".format(Z))\n self.matX[row, column] = X[0]\n self.matY[row, column] = Y[0]\n self.matZ[row, column] = Z[0]\n neighors_values = []\n\n '''\n # Z\n neighors_values.append(self.matZ[neighbor_up][column])\n neighors_values.append(self.matZ[neighbor_down][column])\n neighors_values.append(self.matZ[row][neighbor_right])\n neighors_values = list(filter(diff_from_zero, neighors_values))\n mean = sum(neighors_values) / len(neighors_values)\n self.matZ[row][column] = mean\n neighors_values = []\n '''\n\n self.matRegistration[row][column] = self.gray\n\n elif row > 0 and row < (numrows - 1) and column == (numcols - 1):\n # print(\"Middle last column\")\n # print(\"Neighbor up: {}\".format(self.matX[neighbor_up][column]))\n # print(\"Neighbor down: {}\".format(self.matX[neighbor_down][column]))\n # print(\"Neighbor left: {}\\n\".format(self.matX[row][neighbor_left]))\n\n neighors_values.append(self.matDL[neighbor_up][column])\n neighors_values.append(self.matDL[neighbor_down][column])\n neighors_values.append(self.matDL[row][neighbor_left])\n neighors_values = list(filter(diff_from_zero, neighors_values))\n meanDL = int(sum(neighors_values) / len(neighors_values))\n lpts = [(self.cor_columns[row], meanDL)]\n # print(\"Points: {}\\n\".format(lpts))\n X, Y, Z = self.pc.point3D(plan='Coronal', sequence=cor_sequence, imgnum=1, pts=lpts)\n # print(\"Z: {}\\n\".format(Z))\n self.matX[row, column] = X[0]\n self.matY[row, column] = Y[0]\n self.matZ[row, column] = Z[0]\n neighors_values = []\n\n '''\n # Z\n neighors_values.append(self.matZ[neighbor_up][column])\n neighors_values.append(self.matZ[neighbor_down][column])\n neighors_values.append(self.matZ[row][neighbor_left])\n neighors_values = list(filter(diff_from_zero, neighors_values))\n mean = sum(neighors_values) / len(neighors_values)\n self.matZ[row][column] = mean\n neighors_values = []\n '''\n\n self.matRegistration[row][column] = self.gray\n\n else:\n # print(\"Middle all of it\")\n # print(\"Neighbor up: {}\".format(self.matX[neighbor_up][column]))\n # print(\"Neighbor down: {}\".format(self.matX[neighbor_down][column]))\n # print(\"Neighbor left: {}\".format(self.matX[row][neighbor_left]))\n # print(\"Neighbor right: {}\\n\".format(self.matX[row][neighbor_right]))\n\n # print(\"{} -> {}\\n\".format(self.cor_columns, self.cor_columns[row]))\n neighors_values.append(self.matDL[neighbor_up][column])\n neighors_values.append(self.matDL[neighbor_down][column])\n neighors_values.append(self.matDL[row][neighbor_left])\n neighors_values.append(self.matDL[row][neighbor_right])\n neighors_values = list(filter(diff_from_zero, neighors_values))\n meanDL = int(sum(neighors_values) / len(neighors_values))\n lpts = [(self.cor_columns[row], meanDL)]\n # print(\"Points: {}\\n\".format(lpts))\n X, Y, Z = self.pc.point3D(plan='Coronal', sequence=cor_sequence, imgnum=1, pts=lpts)\n # print(\"Z: {}\\n\".format(Z))\n self.matX[row, column] = X[0]\n self.matY[row, column] = Y[0]\n self.matZ[row, column] = Z[0]\n neighors_values = []\n\n '''\n # Z\n neighors_values.append(self.matZ[neighbor_up][column])\n neighors_values.append(self.matZ[neighbor_down][column])\n neighors_values.append(self.matZ[row][neighbor_left])\n neighors_values.append(self.matZ[row][neighbor_right])\n neighors_values = list(filter(diff_from_zero, neighors_values))\n mean = sum(neighors_values) / len(neighors_values)\n # print(\"Neighbors values: {}\\n\".format(neighors_values))\n # print(\"Mean: {}\\n\".format(mean))\n self.matZ[row][column] = mean\n # print(\"Current position: {}\\n\".format(self.matX[row][column]))\n neighors_values = []\n '''\n\n self.matRegistration[row][column] = self.gray\n\n # c = raw_input(\"?\")", "def mine_neighbor_count(array, game_input, row_length, col_length):\n\tlength = len(game_input)\n\toutput_num = ''\n\tfor x in xrange(length):\n\t\tnum_of_mines = 0\n\t\tposition = x + 1\n\t\trow_num = x / row_length # 0 0 0 1 1 1 2 2 2 3 3 3 4 4 4\n\t\tcol_num = x % row_length # 0 1 2 0 1 2 0 1 2 0 1 2 0 1 2\n\t\tif game_input[x] == \"*\":\n\t\t\toutput_num += \"*\"\n\t\t\tcontinue\n\t\tif col_num > 0:\n\t\t\t# left\n\t\t\tif array[row_num][0][col_num - 1] == \"*\":\tnum_of_mines += 1\n\t\tif col_num < (row_length - 1):\n\t\t\t# right\n\t\t\tif array[row_num][0][col_num + 1] == \"*\":\tnum_of_mines += 1\n\t\tif row_num > 0:\n\t\t\t# deals with top of the array\n\t\t\tif array[row_num - 1][0][col_num] == \"*\":\tnum_of_mines += 1\n\t\t\tif col_num > 0:\n\t\t\t\t#top left\n\t\t\t\tif array[row_num - 1][0][col_num - 1] == \"*\":\tnum_of_mines += 1\n\t\t\tif col_num < row_length - 1:\n\t\t\t\t# top right\n\t\t\t\tif array[row_num - 1][0][col_num + 1] == \"*\":\tnum_of_mines += 1\n\t\tif row_num < col_length - 1:\n\t\t\t# deals with bottom of the array\n\t\t\tif array[row_num + 1][0][col_num] == \"*\":\tnum_of_mines += 1\n\t\t\tif col_num > 0:\n\t\t\t\t# bottom left\n\t\t\t\tif array[row_num + 1][0][col_num - 1] == \"*\":\tnum_of_mines += 1\n\t\t\tif col_num < row_length - 1:\n\t\t\t\t# bottom right\n\t\t\t\tif array[row_num + 1][0][col_num + 1] == \"*\":\tnum_of_mines += 1\n\t\toutput_num += str(num_of_mines)\n\treturn output_num", "def footprint_corner_indices():", "def get_further_neighbours(self, cell):\n\t\tneighs = self.get_neighbours(cell)\n\t\ti, j = cell.find_id()\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\tx, y = neigh.find_id()\n\t\t\tif abs(x-i)+abs(y-j) > 1 or abs(x-i)+abs(y-j) == 0: \n\t\t\t\tneighbours.append(self.space[y,x])\n\t\treturn neighbours", "def neighbor(self, position):\n if self.options['PeriOpt']:\n if position[0] == 0:\n yield (self.size[0] - 1, position[1]), 0\n elif position[0] == self.size[0] - 1:\n yield (0, position[1]), 1\n if position[1] == 0:\n yield (position[0], self.size[1] - 1), 2\n elif position[1] == self.size[1] - 1:\n yield (position[0], 0), 3\n\n if position[0] > 0:\n yield (position[0] - 1, position[1]), 0\n if position[0] < self.size[0] - 1:\n yield (position[0] + 1, position[1]), 1\n if position[1] > 0:\n yield (position[0], position[1] - 1), 2\n if position[1] < self.size[1] - 1:\n yield (position[0], position[1] + 1), 3", "def next_life_generation(a):\n w = len(a[0])\n h = len(a)\n new_a = create_board(w, h)\n\n for n in range(h):\n for m in range(w):\n if 0 < n < h - 1 and 0 < m < w - 1:\n count = count_neighbours(n, m, a)\n if count < 2 or count > 3:\n new_a [n][m] = 0\n elif count == 3:\n new_a[n][m] =1\n else:\n new_a[n][m] = a[n][m]\n else:\n new_a[n][m] = 0\n \n return new_a", "def make_mammalian_n_glycan_neighborhoods():\n neighborhoods = NeighborhoodCollection()\n\n _neuraminic = \"(%s)\" % ' + '.join(map(str, (\n FrozenMonosaccharideResidue.from_iupac_lite(\"NeuAc\"),\n FrozenMonosaccharideResidue.from_iupac_lite(\"NeuGc\")\n )))\n _terminal = _neuraminic + \\\n \" + max(%s - %%d, 0)\" % FrozenMonosaccharideResidue.from_iupac_lite(\"Hex\")\n _hexose = \"(%s)\" % ' + '.join(\n map(str, map(FrozenMonosaccharideResidue.from_iupac_lite, ['Hex', ])))\n _hexnac = \"(%s)\" % ' + '.join(\n map(str, map(FrozenMonosaccharideResidue.from_iupac_lite, ['HexNAc', ])))\n\n high_mannose = CompositionRangeRule(\n _hexose, 3, 12) & CompositionRangeRule(\n _hexnac, 2, 2) & CompositionRangeRule(\n _neuraminic, 0, 0)\n high_mannose.name = \"high-mannose\"\n neighborhoods.add(high_mannose)\n\n base_hexnac = 3\n base_terminal_groups = 2\n for i, spec in enumerate(['hybrid', 'bi', 'tri', 'tetra', 'penta', \"hexa\", \"hepta\"]):\n if spec == 'hybrid':\n rule = CompositionRangeRule(\n _hexnac, base_hexnac - 1, base_hexnac + 1\n ) & CompositionRangeRule(\n _neuraminic, 0, base_terminal_groups) & CompositionRangeRule(\n _hexose, base_hexnac + i - 1,\n base_hexnac + i + 3)\n rule.name = spec\n neighborhoods.add(rule)\n else:\n sialo = CompositionRangeRule(\n _hexnac, base_hexnac + i - 1, base_hexnac + i + 1\n ) & CompositionRangeRule(\n (_neuraminic), 1, base_terminal_groups + i\n ) & CompositionExpressionRule(\n \"(Hex > %d) & (Hex < (%d - (NeuAc + NeuGc)))\" % (base_hexnac + i - 2, base_hexnac + (2 * i) + 3))\n\n sialo.name = \"%s-antennary\" % spec\n asialo = CompositionRangeRule(\n _hexnac, base_hexnac + i - 1, base_hexnac + i + 1\n ) & CompositionRangeRule(\n _neuraminic, 0, 1 if i < 2 else 0\n ) & CompositionRangeRule(\n _hexose, base_hexnac + i - 1,\n base_hexnac + (2 * i) + 3)\n asialo.name = \"asialo-%s-antennary\" % spec\n neighborhoods.add(sialo)\n neighborhoods.add(asialo)\n return neighborhoods", "def neighbors(i: int, j: int) -> np.array:\n k, l = (i//3)*3, (j//3)*3 # noqa E741\n return np.array([\n np.r_[i:i:8j, 0:i, i + 1:9, np.repeat(np.r_[k:i, i + 1:k + 3], 2)],\n np.r_[0:j, j + 1:9, j:j:8j, np.tile(np.r_[l:j, j + 1:l + 3], 2)],\n ], dtype=np.uint8)", "def neighbors8(point):\n x, y = point\n return ((x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1),\n (x + 1, y + 1), (x - 1, y - 1), (x + 1, y - 1), (x - 1, y + 1))", "def get_neighbours(self):\n return []", "def indapproxpattern(pattern, string, nummismatch):\n\n indarr = []\n# substringarr = []\n numchars = len(pattern)\n\n for i in xrange(0, len(string) - numchars + 1):\n \n substring = patterncount.subtext(string, i, numchars)\n \n if hammingdist(pattern, substring) <= nummismatch:\n \n indarr.append(i)\n# substringarr.append(substring)\n \n return indarr", "def get_neighbours(self, row, col):\n neighbour_location_diffs = [(-1, -1),\n ( 0, -1),\n ( 1, -1),\n ( 1, 0),\n ( 1, 1),\n ( 0, 1),\n (-1, 1),\n (-1, 0)]\n neighbours = []\n for diff in neighbour_location_diffs:\n if (row + diff[0] >= 0 and\n row + diff[0] < self.height and\n col + diff[1] >= 0 and\n col + diff[1] < self.width):\n neighbours.append(self.cells[row + diff[0]][col + diff[1]])\n return neighbours", "def cycle(start, times):\n current_gen = start\n for _ in range(times):\n next_gen = defaultdict(int)\n all_locs = get_all_neighbors(current_gen.keys())\n all_locs.update(current_gen.keys())\n for loc in all_locs:\n neighbors = get_neighbors(loc)\n count = sum(current_gen[n] for n in neighbors)\n if count in (2, 3) and current_gen[loc] == 1:\n next_gen[loc] = 1\n elif count == 3 and current_gen[loc] == 0:\n next_gen[loc] = 1\n current_gen = next_gen\n return current_gen", "def random_walker_generator(rows, cols, negative=False):\n attempts = 0\n while True:\n steps = 0\n found_goal = False\n grid = np.zeros((rows, cols))\n # start on bottom row\n current = (rows - 1, random.randint(0, cols - 1))\n grid[current] = 1\n steps += 1\n visited = set(current)\n\n connection = 0\n\n neighbors = get_neighbors(current, grid, visited, similar_cells={1})\n while len(neighbors) > 0:\n for (neigh_x, neigh_y) in set(neighbors):\n # lookahead for neighbors neighbors\n lookahead = get_neighbors(\n (neigh_x, neigh_y), grid, visited, similar_cells={1})\n if len(lookahead) < 3: # contains neighbors with 1's\n # edge cases\n if neigh_x == 0 and random.random() >= 0.25:\n # chance of reaching goal at top\n continue\n elif ((neigh_y == 0 or neigh_y == rows - 1) and\n len(lookahead) == 2):\n continue\n else:\n neighbors.remove((neigh_x, neigh_y))\n\n if len(neighbors) == 0:\n # print (\"no more neighbors to pick\")\n break\n\n # time.sleep(0.15)\n # os.system(\"clear\")\n # draw_grid(grid)\n\n current = random.sample(neighbors, 1)[0] # pick a random neighbor\n # print (\"selected: \", current)\n grid[current] = 1\n steps += 1\n visited.add(current)\n if current[0] == 0: # top row\n # print (\"top row reached\")\n found_goal = True\n break\n neighbors = get_neighbors(current, grid, visited, similar_cells={1})\n\n if (found_goal and not negative) or (not found_goal and negative):\n # print (\"Succeeded after %d attempts\" % attempts)\n attempts = 0\n grid = apply_noise(grid)\n\n # hack\n # override above step counter, because the random noise\n # might have added more, shorter connections\n # we do this because network was picking up patterns\n # from making random noise not entirely random\n steps, connected = check_connections_length(grid)\n if connected and negative:\n continue\n\n # randomly flip grid upside down\n if random.random() <= 0.5:\n grid = np.flipud(grid)\n\n yield grid, steps, connected\n else:\n attempts += 1", "def make_pattern(pixels, origin, pattern_size, ndots):\n\tw,h = pattern_size\n\tow,oh = origin\n\tcoordinates = itertools.product(range(h), range(w))\n\twith_offset = [(c+ow, r+oh) for r,c in coordinates]\n\t# take only n dots\n\twith_offset = with_offset[:ndots]\n\tfor c,r in with_offset:\n\t\tpixels[c, r] = 0", "def checkNeighbours(data):\n features = 0\n background = 0\n neighbours = [data[0,0],data[0,1],data[0,2],data[1,2],data[2,2],data[2,1],data[2,0],data[1,0]]\n fourConnected = False\n lastPoint = neighbours[-1] #Needed for checking a complete transition cycle\n for n in neighbours:\n if not n:\n features += 1\n elif fourConnected:\n background += 1\n\n fourConnected = not fourConnected\n lastPoint = n\n\n for pos,corner in enumerate(corners):\n if numpy.alltrue(data == corner):\n cornerPos = pos+1\n break\n else:\n cornerPos = 0\n return (features,background,cornerPos)", "def neighbours(self, i, j):\n nearest = []\n for x_offset, y_offset in [(0, -1), (0, 1), (1, 0), (-1, 0)]:\n try:\n nearest.append(self.as_list[checkNonNegIndex(i + x_offset)][checkNonNegIndex(j + y_offset)])\n except IndexError:\n continue\n except TypeError:\n continue\n return nearest", "def get_moore_neighbor_info(self, i, j, cell_info) -> dict:\n neighbor_info = []\n for a in range(-1, 2):\n for b in range(-1, 2):\n if not (a == b == 0): \n neighbor_info.append(cell_info[i + a][j + b])\n return neighbor_info", "def test_get_neighbours(self):\n data = [[0, 0, 0],\n [0, 0, 0],\n [0, 0, 0]]\n board = Board(data)\n\n # [pos(x, y), #neighbours]\n posx_posy_n = [[0, 0, 2], [0, 1, 3], [0, 2, 2],\n [1, 0, 3], [1, 1, 4], [1, 2, 3],\n [2, 0, 2], [2, 1, 3], [2, 2, 2]]\n for x, y, n in posx_posy_n:\n neighbours = [i for i in board.get_neighbours(x, y)]\n self.assertEquals(len(neighbours), n)", "def get_test_pattern(img_size=(2048, 2048)):\n ny, nx = img_size\n # mask = np.zeros((ny, nx))\n\n # patterns with variable spacing\n periods = range(2, 20, 2)\n # vcounter = 0\n for ii, p in enumerate(periods):\n cell = np.zeros((p, nx))\n on_pix = int(np.ceil(p / 2))\n cell[:on_pix, :] = 1\n cell = np.tile(cell, [4, 1])\n\n if ii == 0:\n mask = cell\n else:\n mask = np.concatenate((mask, cell), axis=0)\n\n mask = mask[:, :mask.shape[0]]\n\n mask_block = np.concatenate((mask, np.rot90(mask)), axis=1)\n mask_block2 = np.concatenate((np.rot90(mask), mask), axis=1)\n\n mask_superblock = np.concatenate((mask_block, mask_block2))\n\n ny_reps = int(np.ceil(ny / mask_superblock.shape[0]))\n nx_reps = int(np.ceil(nx / mask_superblock.shape[1]))\n mask = np.tile(mask_superblock, [ny_reps, nx_reps])\n mask = mask[0:ny, 0:nx]\n\n return mask", "def neighbor(board, x, y, n, m):\n deltas = (\n (-1, -1), (-1, 0), (-1, 1),\n (0, -1), (0, 1),\n (1, -1), (1, 0), (1, 1),\n )\n count = 0\n for dx, dy in deltas:\n xx = x + dx\n yy = y + dy\n if xx >= 0 and xx < n and yy >= 0 and yy < m and board[xx][yy] % 2 == 1:\n count += 1\n\n return count", "def neighbours_of_position(coords):\n row = coords[0]\n col = coords[1]\n \n #assign each of neighbours corrds\n #top left to top rigt\n top_left = (row - 1, col - 1)\n top_center = (row - 1, col)\n top_right = (row - 1, col + 1)\n \n # left to right (center)\n left = (row, col - 1)\n # the (row, col) cordinates passed into this function are situated here\n right = (row, col + 1)\n \n #bottom-left to bottom-right\n bottom_left = (row +1, col -1)\n bottom_center = (row +1, col)\n bottom_right = (row +1, col +1)\n \n return [top_left, top_center, top_right,\n left , right ,\n bottom_left, bottom_center, bottom_right]", "def neighbors(i , j) :\n ns = []\n # vector de direction\n dx = [+1, +1, 0, 1]\n dy = [0, +1, 1, -1]\n for d in range(4) :\n ns.append((i + dx[d], j + dy[d]))\n #remove neagative element\n ns = [i for i in ns if i[0] >= 0 and i[1] >= 0]\n return ns", "def generate_nearby_cells(self):\n for y in range(len(self.island_map)):\n for x in range(len(self.island_map[y])):\n list_of_nearby_cells = []\n\n if y != 0:\n self.generate_cell_above(x, y, list_of_nearby_cells)\n\n if x != 0:\n self.generate_cell_left(x, y, list_of_nearby_cells)\n\n if y != len(self.island_map)-1:\n self.generate_cell_below(x, y, list_of_nearby_cells)\n\n if x != len(self.island_map[y])-1:\n self.generate_cell_right(x, y, list_of_nearby_cells)\n\n self.island_map[y][x].nearby_cells = list_of_nearby_cells", "def test_neighbor():\n UP = (0, -1)\n LEFT = (-1, 0)\n DOWN_RIGHT = (1, 1)\n HERE = (0, 0)\n\n c1 = Cell(2, 6, 100)\n c1_up = c1.neighbor(UP)\n c1_left = c1.neighbor(LEFT)\n c1_down_right = c1.neighbor(DOWN_RIGHT)\n c1_here = c1.neighbor(HERE)\n\n assert c1_up == (2, 5)\n assert c1_up[0] == 2\n assert c1_left == (1, 6)\n assert c1_left[1] == 6\n assert c1_down_right == (3, 7)\n assert c1_here == (2, 6)\n\n c2 = Cell(4, 2, 200)\n c2_up = c2.neighbor(UP)\n c2_left = c2.neighbor(LEFT)\n c2_down_right = c2.neighbor(DOWN_RIGHT)\n c2_here = c2.neighbor(HERE)\n\n assert c2_up == (4, 1)\n assert c2_left == (3, 2)\n assert c2_down_right == (5, 3)\n assert c2_here == (4, 2)", "def chromosome_to_cycle(chromosome):\n nodes = [0] * (len(chromosome) * 2 + 1)\n chromo = [0] + chromosome.copy()\n for j in range(1, len(chromo)):\n i = chromo[j]\n if i > 0:\n nodes[2 * j - 1] = 2 * i - 1\n nodes[2 * j] = 2 * i\n else:\n nodes[2 * j - 1] = -2 * i\n nodes[2 * j] = -2 * i - 1\n return nodes[1:]", "def numIslands(grid):\n # count to store each new island found\n count = 0\n # If the grid is empty, return 0\n if not grid:\n return count\n\n y_max = len(grid)\n x_max = len(grid[0])\n \n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == '1':\n dfs(grid, i, j)\n count += 1\n return count", "def findGaps(sequence):\n patt = re.compile(r\"[Nn]+\")\n for match in patt.finditer(sequence):\n yield (match.start(), match.end())", "def generateNeighborMap(self):\n A=[]\n for key,value in self._ts_dict.iteritems():\n A.append(np.array([i.replace(\"#\",\" \")\n .split()[0:4] for i in value.index])\n .astype(float))\n\n B=np.array(A[0]).reshape(len(A[0]),4)\n print (B[:,0]+B[:,1])/2\n A=[]\n for key,value in self._ts_dict.iteritems():\n A.append(value.sum(axis=1).values)\n print A" ]
[ "0.6630292", "0.6147133", "0.61121386", "0.6109716", "0.6099266", "0.60585225", "0.60353184", "0.5980777", "0.59600437", "0.5955379", "0.58933324", "0.5820584", "0.5806383", "0.5797996", "0.5792484", "0.5790195", "0.57679236", "0.5754327", "0.5728205", "0.5717146", "0.5706085", "0.570483", "0.56959444", "0.56939876", "0.5687913", "0.5687054", "0.5681787", "0.5659362", "0.5639083", "0.5631543", "0.56272215", "0.562653", "0.5608891", "0.5607642", "0.5602304", "0.55547076", "0.5547365", "0.5535609", "0.5506445", "0.5498271", "0.54724383", "0.5463622", "0.5463622", "0.5462552", "0.5458439", "0.5455606", "0.54502594", "0.54369414", "0.5429639", "0.5429552", "0.5426932", "0.5414496", "0.5409747", "0.5404867", "0.5390412", "0.5390119", "0.53875047", "0.5360768", "0.53527075", "0.53479344", "0.5339143", "0.5329275", "0.5329012", "0.53273493", "0.53225005", "0.5321644", "0.53134954", "0.5313013", "0.5302731", "0.53004336", "0.5288424", "0.52780473", "0.5276104", "0.5275671", "0.5272261", "0.5270824", "0.5260488", "0.5252026", "0.52429307", "0.5236743", "0.52318937", "0.52297217", "0.5221172", "0.52109545", "0.52086884", "0.5207739", "0.52056885", "0.52047527", "0.52042896", "0.51971537", "0.51901025", "0.51751345", "0.5170096", "0.5164644", "0.5162908", "0.51606166", "0.5156514", "0.5153174", "0.5147508", "0.51471037" ]
0.7541577
0
Generate neighbours for the given pattern (genome string)
def generate_neighbours(pattern: str, mismatches: int) -> set: neighbourhood = set() neighbourhood.add(pattern) curr_patterns = [pattern] next_patterns = [] for curr_mismatches in range(mismatches): for curr_pattern in curr_patterns: for neighbour in _generate_immediate_neighbours(curr_pattern): if neighbour not in neighbourhood: neighbourhood.add(neighbour) next_patterns.append(neighbour) curr_patterns = next_patterns next_patterns = [] return neighbourhood
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_immediate_neighbours(pattern: str) -> list:\n generated = []\n for i in range(len(pattern)):\n if pattern[i] == 'A':\n generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_A])\n elif pattern[i] == 'C':\n generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_C])\n elif pattern[i] == 'T':\n generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_T])\n elif pattern[i] == 'G':\n generated.extend([pattern[:i] + c + pattern[i + 1:] for c in LIST_G])\n\n return generated", "def neighbors(pattern, d):\n\n if d == 0:\n return [pattern]\n if len(pattern) == 1:\n return ['A', 'C', 'G', 'T']\n neighborhood = []\n suffix_pattern = pattern[1:]\n suffix_neighbors = neighbors(suffix_pattern, d)\n for text in suffix_neighbors:\n hdist = compute_hamming_distance(suffix_pattern, text)\n if hdist < d:\n for n in ['A', 'C', 'G', 'T']:\n neighbor = n + text\n neighborhood.append(neighbor)\n else:\n neighbor = pattern[0] + text\n neighborhood.append(neighbor)\n return neighborhood", "def neighbors(pattern, d):\n if d == 0:\n return pattern\n\n if len(pattern) == 1:\n return ['A', 'C', 'G', 'T']\n\n neighborhood = []\n\n # ##########\n # We can use recursion to successively compute neighbors(suffix(pattern), d),\n # where suffix(pattern) = pattern[1:]\n #\n # The reason being: if we have neighbors(suffix(pattern, d)), then we know\n # that the Hamming Distance between `pattern` and `suffix(pattern)` is either equal\n # to d or less than d.\n #\n # In the first case, we can add `pattern[0]` to the beginning of\n # `suffix(pattern)` in order to obtain a k-mer belonging to\n # Neighbors(Pattern, d). In the second case, we can add any symbol\n # to the beginning of `suffix(pattern)` and obtain a k-mer belonging\n # to Neighbors(Pattern, d).\n # ##########\n\n suffix_pattern = pattern[1:]\n suffix_neighbors = neighbors(suffix_pattern, d)\n\n for i in range(len(suffix_neighbors)):\n\n neighboring_pattern_text = suffix_neighbors[i]\n\n if hamming_distance(suffix_pattern, neighboring_pattern_text) < d:\n for n in _NUCLEOTIDES:\n neighborhood.append(n + neighboring_pattern_text)\n\n else:\n neighborhood.append(pattern[0] + neighboring_pattern_text)\n\n return neighborhood", "def neighbors(pattern, d):\n tides = set([\"A\", \"C\", \"G\", \"T\"])\n if d == 0:\n return set([pattern])\n if len(pattern) == 1:\n return tides\n neighborhood = set([])\n suffix_neighbors = neighbors(pattern[1:], d)\n for text in suffix_neighbors:\n if ham_dist(pattern[1:], text) < d:\n for tide in tides:\n neighborhood.add(tide + text)\n else:\n neighborhood.add(pattern[0] + text)\n return neighborhood", "def _get_neighbors(cls, pattern: str, max_distance: int) -> List[str]:\n return get_neighborhood(pattern, ''.join(cls.nucleobases.keys()), max_distance)", "def _get_neighbours(kmer):\n assert (is_dna(kmer))\n bases = 'ACTG'\n result = set()\n for i in range(len(kmer)):\n for base in bases:\n result.add(kmer[:i] + base + kmer[(i + 1):])\n return result", "def get_neighbors(pattern, d):\n # if no difference\n if d == 0:\n return [pattern]\n # if no pattern\n if len(pattern) == 1:\n return ['A', 'C', 'T', 'G']\n # initialize the container\n neighborhood = set()\n # checking for the suffix patterns\n neighbors = get_neighbors(pattern[1:], d)\n # iterates through the neighbors\n for kmer in neighbors:\n # check for the allowed distance\n if hamming_distance(pattern[1:], kmer) < d:\n # iterates through the charcater/bases\n for char in ['A', 'C', 'T', 'G']:\n # add the character to the suffix payyern\n neighborhood.add(char + kmer)\n else:\n # otherwise add the first character again\n neighborhood.add(pattern[0] + kmer)\n return sorted(list(neighborhood))", "def _get_neighbours(self, pos, input_data):\r\n neighbours = []\r\n\r\n start = AlignmentOutputData.table_values[pos.y][pos.x]\r\n diagonal = float(strings.NAN)\r\n up = float(strings.NAN)\r\n left = float(strings.NAN)\r\n\r\n cur_char_seq_1 = strings.EMPTY\r\n cur_char_seq_2 = strings.EMPTY\r\n\r\n if pos.y - 1 >= 0 and pos.x - 1 >= 0:\r\n diagonal = AlignmentOutputData.table_values[pos.y - 1][pos.x - 1]\r\n\r\n if pos.y - 1 >= 0:\r\n up = AlignmentOutputData.table_values[pos.y - 1][pos.x]\r\n\r\n if pos.x - 1 >= 0:\r\n left = AlignmentOutputData.table_values[pos.y][pos.x - 1]\r\n\r\n if pos.y - 1 >= 0:\r\n cur_char_seq_1 = input_data.sequence_a[pos.y - 1]\r\n if pos.x - 1 >= 0:\r\n cur_char_seq_2 = input_data.sequence_b[pos.x - 1]\r\n\r\n matching = start == diagonal + input_data.cost_function.get_value(cur_char_seq_1, cur_char_seq_2)\r\n deletion = start == up + input_data.gap_cost\r\n insertion = start == left + input_data.gap_cost\r\n\r\n if matching:\r\n neighbours.append(Vector(pos.x - 1, pos.y - 1))\r\n\r\n if insertion:\r\n neighbours.append(Vector(pos.x - 1, pos.y))\r\n\r\n if deletion:\r\n neighbours.append(Vector(pos.x, pos.y - 1))\r\n\r\n return neighbours", "def indapproxpattern(pattern, string, nummismatch):\n\n indarr = []\n# substringarr = []\n numchars = len(pattern)\n\n for i in xrange(0, len(string) - numchars + 1):\n \n substring = patterncount.subtext(string, i, numchars)\n \n if hammingdist(pattern, substring) <= nummismatch:\n \n indarr.append(i)\n# substringarr.append(substring)\n \n return indarr", "def neighbors_generator(state: str, nurses_number=10) -> str:\n\n genes = 21 * nurses_number\n\n # Random index to change and generated the neighbor\n index = randrange(0, genes)\n\n # Here we're taking the first part of the state before the bit that will be modified\n new_state = state[0:index]\n\n # Here is modified the bit\n if state[index] == '0':\n new_state += '1'\n else:\n new_state += '0'\n\n # Here we're taking the last part of the state passed\n new_state += state[index+1:]\n\n # Here is returned the new state and the next bit to be modified\n return new_state", "def find_neighbours(engine, field, features):\n code = CodeSegment(engine)\n N = len(engine.q)\n Nf = 3 ** engine.pm.ndim\n code.assign(x=Literal(numpy.zeros((N, Nf))), y='features')\n grid = engine.pm.generate_uniform_particle_grid(shift=0)\n for i in range(Nf):\n ii = i\n a = []\n for d in range(engine.pm.ndim):\n a.append(ii % 3 - 1)\n ii //= 3\n\n grid1 = grid + numpy.array(a[::-1]) * (engine.pm.BoxSize / engine.pm.Nmesh)\n layout = engine.pm.decompose(grid1)\n code.readout(x=Literal(grid1), mesh='field', value='feature1', layout=Literal(layout), resampler='nearest')\n code.assign_component(attribute='features', value='feature1', dim=i)\n return code", "def neighbours(num):\n num = str(num)\n num = '0'*(4-len(num))+num # Prepend 0 until length is 4\n\n return [\n int(add_wo_carry(num, '0001')),\n int(add_wo_carry(num, '0010')),\n int(add_wo_carry(num, '0100')),\n int(add_wo_carry(num, '1000')),\n int(sub_wo_carry(num, '0001')),\n int(sub_wo_carry(num, '0010')),\n int(sub_wo_carry(num, '0100')),\n int(sub_wo_carry(num, '1000'))]", "def find_pattern(pattern, genome):\n\n tens_table = [pow(10, m) for m in xrange(len(pattern))]\n hash_pattern = get_hash(pattern, tens_table)\n index = []\n for current_index in xrange(len(genome) - len(pattern) + 1):\n\t\tif current_index == 0:\n\t\t\tcurrent_hash = get_hash(genome[0:len(pattern)], tens_table)\n\t\telse:\n\t\t\tcurrent_hash = ((current_hash - (nucleotide_value_map[genome[current_index-1]] * tens_table[len(pattern)-1])) * 10 + nucleotide_value_map[genome[current_index-1+len(pattern)]])\n if current_hash == hash_pattern:\n index.append(current_index)\n return index", "def motif_enumeration(dna, k, d):\n\n patterns = []\n neighborhoods = []\n for dna_string in dna:\n len_dna_string = len(dna_string)\n neighborhood = []\n for i in range(len_dna_string - k + 1):\n pattern = dna_string[i:i + k]\n dna_neighbors = neighbors(pattern, d)\n neighborhood = neighborhood + dna_neighbors\n neighborhoods.append(neighborhood)\n for n in neighborhoods[0]:\n count = 0\n for i in range(1, len(neighborhoods)):\n if n in neighborhoods[i]:\n count = count + 1\n if count == len(neighborhoods) - 1 and n not in patterns:\n patterns.append(n)\n return patterns", "def compute_pattern(n):\n for x in range(1,n):\n for y in range(x, x*2):\n print(y, end= \" \")\n print()", "def findGaps(sequence):\n patt = re.compile(r\"[Nn]+\")\n for match in patt.finditer(sequence):\n yield (match.start(), match.end())", "def Neighbourgs(abcd, h):\n\n Nelem = len(abcd)\n\n a = abcd[h][0]\n b = abcd[h][1]\n c = abcd[h][2]\n d = abcd[h][3]\n\n el1, el2, el3, el4 = 0, 0, 0, 0\n\n N = 0\n\n for j in range(0, Nelem - 1):\n\n if N == 4:\n break\n\n if a in abcd[j, :] and b in abcd[j, :] and j != h:\n N += 1\n el1 = j + 1\n\n if b in abcd[j, :] and c in abcd[j, :] and j != h:\n N += 1\n el2 = j + 1\n\n if c in abcd[j, :] and d in abcd[j, :] and j != h:\n N += 1\n el3 = j + 1\n\n if d in abcd[j, :] and a in abcd[j, :] and j != h:\n N += 1\n el4 = j + 1\n\n return [el1, el2, el3, el4]", "def getNeighbours(seg,meta,inversedIndex):\n return np.unique(np.fromiter( (inversedIndex[x] for x in np.concatenate([meta.loc[seg]['ins'],meta.loc[seg]['outs']])),dtype=np.int))", "def get_neighbours(self, grid):\n\t\tfor diff in ((-1, 0), (1, 0), (0, -1), (0, 1)):\n\t\t\tres = Vector((self.row, self.col)) + diff\n\t\t\tif res[0] >= 0 and res[1] >= 0 and res[0] < len(grid) and res[1] < len(grid[0]):\n\t\t\t\tyield grid[res[0]][res[1]]", "def mapNighbours(self,name):\n \twith open(name, 'r', encoding='utf-8') as f:\n for line in f:\n li = line.split()\n item=li[0]\n nighbs=li[1:]\n self.NighboursMap[item]=nighbs", "def test_split_adds_known_neighbours(mock_amg):\n\n mock_amg.cells[4].split()\n # bl\n assert mock_amg.cells[-4].north is mock_amg.cells[-2]\n assert mock_amg.cells[-4].east is mock_amg.cells[-3]\n\n # br\n assert mock_amg.cells[-3].north is mock_amg.cells[-1]\n assert mock_amg.cells[-3].west is mock_amg.cells[-4]\n\n # tl\n assert mock_amg.cells[-2].south is mock_amg.cells[-4]\n assert mock_amg.cells[-2].east is mock_amg.cells[-1]\n\n # tr\n assert mock_amg.cells[-1].south is mock_amg.cells[-3]\n assert mock_amg.cells[-1].west is mock_amg.cells[-2]", "def make_n_glycan_neighborhoods():\n neighborhoods = NeighborhoodCollection()\n\n _neuraminic = \"(%s)\" % ' + '.join(map(str, (\n FrozenMonosaccharideResidue.from_iupac_lite(\"NeuAc\"),\n FrozenMonosaccharideResidue.from_iupac_lite(\"NeuGc\")\n )))\n _hexose = \"(%s)\" % ' + '.join(\n map(str, map(FrozenMonosaccharideResidue.from_iupac_lite, ['Hex', ])))\n _hexnac = \"(%s)\" % ' + '.join(\n map(str, map(FrozenMonosaccharideResidue.from_iupac_lite, ['HexNAc', ])))\n\n high_mannose = CompositionRangeRule(\n _hexose, 3, 12) & CompositionRangeRule(\n _hexnac, 2, 2) & CompositionRangeRule(\n _neuraminic, 0, 0)\n high_mannose.name = \"high-mannose\"\n neighborhoods.add(high_mannose)\n\n base_hexnac = 3\n base_neuac = 2\n for i, spec in enumerate(['hybrid', 'bi', 'tri', 'tetra', 'penta', \"hexa\", \"hepta\"]):\n if i == 0:\n rule = CompositionRangeRule(\n _hexnac, base_hexnac - 1, base_hexnac + 1\n ) & CompositionRangeRule(\n _neuraminic, 0, base_neuac) & CompositionRangeRule(\n _hexose, base_hexnac + i - 1,\n base_hexnac + i + 3)\n rule.name = spec\n neighborhoods.add(rule)\n else:\n sialo = CompositionRangeRule(\n _hexnac, base_hexnac + i - 1, base_hexnac + i + 1\n ) & CompositionRangeRule(\n _neuraminic, 1, base_neuac + i\n ) & CompositionRangeRule(\n _hexose, base_hexnac + i - 1,\n base_hexnac + i + 2)\n\n sialo.name = \"%s-antennary\" % spec\n asialo = CompositionRangeRule(\n _hexnac, base_hexnac + i - 1, base_hexnac + i + 1\n ) & CompositionRangeRule(\n _neuraminic, 0, 1 if i < 2 else 0\n ) & CompositionRangeRule(\n _hexose, base_hexnac + i - 1,\n base_hexnac + i + 2)\n\n asialo.name = \"asialo-%s-antennary\" % spec\n neighborhoods.add(sialo)\n neighborhoods.add(asialo)\n return neighborhoods", "def neighbours(x, y):\n n = []\n for c in ((y-1, x-1), (y-1, x), (y-1, x+1), (y, x-1), (y, x+1), (y+1, x-1), (y+1, x), (y+1, x+1)):\n n.append(c)\n return n", "def get_neighbours_8(x, y):\n return [(x - 1, y - 1), (x, y - 1), (x + 1, y - 1), \\\n (x - 1, y), (x + 1, y), \\\n (x - 1, y + 1), (x, y + 1), (x + 1, y + 1)]", "def countNeighbors(oldgen, x, y):\n temp = 1\n\n count = 0\n for i in range(-1, 2):\n for j in range(-1, 2):\n\n # TODO: this needs rewritin to be more understandable\n if not (i == 0 and j == 0):\n count += int(oldgen[(x + i + WID) % WID][(y + j + HGT) % HGT])\n\n for i in range(-1, 2):\n for j in range(-1, 2):\n temp += 1\n\n count -= int(oldgen[x][y])\n\n return count", "def get_neighbour(self, y, x):\n if [y, x] in self.mine_locations:\n return Minesweeper.BOMB\n count = 0\n # (x-1, y-1), (x, y-1), (x+1, y-1),\n # (x-1, y), (x, y), (x+1, y),\n # (x-1, y+1), (x, y+1), (x+1, y+1)\n for xe in range(x - 1, x + 2):\n for ye in range(y - 1, y + 2):\n if [ye, xe] in self.mine_locations:\n count += 1\n return str(count)", "def search(self, data, classes):\n self.patterns = []\n c = 0\n time_d=0\n for seed in self.seeds:\n #print seed\n c += 1\n if self.verbose and c%100 == 0: \n print \"Searching with seed %s\" % str(seed)\n print np.mean(time_d) \n time_d = 0\n\n pattern = self.search_method.create_pattern(data, seed)\n pattern.evaluate(data, self.metric, classes)\n st=time.clock()\n while True:\n next_pattern = max(pattern.expand(self.network,self.radius),\n key=lambda ex: ex.evaluate(data, self.metric, classes)) \n if (next_pattern.score / pattern.score) > 1+self.min_improve:\n pattern = next_pattern\n # print \"zlepseni\",pattern.score\n else: \n break\n #pattern.edges = filter_edges(pattern.edges, pattern.genes)\n time_d += time.clock()-st\n if self.trivial_patterns or len(list(seed)[0]) > 2:\n self.patterns += [pattern] \n # check_dir(self.base_dir + 'greedy_search_pics/')\n # if self.draw:\n # gene_color = dict()\n # for gene in pattern.genes:\n # edges_names = set((self.gene_names[h1], self.gene_names[h2]) for (h1, h2) in pattern.edges)\n # # a function to color a gene in discovered pattern\n # gene_color[self.gene_names[gene]] = scipy.stats.ttest_ind(data[:,-1], GE_profile = data[:,gene])\n # print \"Drawing a graph for seed %s\" % str(seed)\n # draw_graph(edges_names, self.base_dir + 'greedy_search_pics/test-graph-greedy', seed)\n\n # if seed > 550:\n # break\n\n return self.patterns", "def make_pattern(pixels, origin, pattern_size, ndots):\n\tw,h = pattern_size\n\tow,oh = origin\n\tcoordinates = itertools.product(range(h), range(w))\n\twith_offset = [(c+ow, r+oh) for r,c in coordinates]\n\t# take only n dots\n\twith_offset = with_offset[:ndots]\n\tfor c,r in with_offset:\n\t\tpixels[c, r] = 0", "def expand2(self, network, radius):\n expansion = []\n neighbors = frozenset().union(*[set(network.getrow(g).indices) for g in self.genes]) \n for neighb in neighbors: \n if neighb in self.genes:\n continue\n dist_from_pattern = self.dist[network.getrow(neighb).indices] \n dist_of_added = dist_from_pattern[dist_from_pattern > -1].min() + 1\n if dist_of_added > radius:\n continue\n \n next_pattern = copy.deepcopy(self)\n next_pattern.genes.add(neighb)\n #next_pattern.edges.add((pred, neighb))\n next_pattern.added = neighb \n next_pattern.dist[neighb] = dist_of_added\n expansion += [next_pattern]\n\n return expansion if len(expansion) > 0 else [self]", "def _get_neighbours(self, position):\n grid = self._grid\n x, y = position\n neighbours = []\n offsets = [(0,1),(1,0),(0,-1),(-1,0)]\n shuffle(offsets)\n for offset in offsets:\n i, j = offset\n position = (x + i, y + j)\n if grid.valid_position(position) and position not in self.shots:\n neighbours.append(position)\n return neighbours", "def mine_neighbor_count(array, game_input, row_length, col_length):\n\tlength = len(game_input)\n\toutput_num = ''\n\tfor x in xrange(length):\n\t\tnum_of_mines = 0\n\t\tposition = x + 1\n\t\trow_num = x / row_length # 0 0 0 1 1 1 2 2 2 3 3 3 4 4 4\n\t\tcol_num = x % row_length # 0 1 2 0 1 2 0 1 2 0 1 2 0 1 2\n\t\tif game_input[x] == \"*\":\n\t\t\toutput_num += \"*\"\n\t\t\tcontinue\n\t\tif col_num > 0:\n\t\t\t# left\n\t\t\tif array[row_num][0][col_num - 1] == \"*\":\tnum_of_mines += 1\n\t\tif col_num < (row_length - 1):\n\t\t\t# right\n\t\t\tif array[row_num][0][col_num + 1] == \"*\":\tnum_of_mines += 1\n\t\tif row_num > 0:\n\t\t\t# deals with top of the array\n\t\t\tif array[row_num - 1][0][col_num] == \"*\":\tnum_of_mines += 1\n\t\t\tif col_num > 0:\n\t\t\t\t#top left\n\t\t\t\tif array[row_num - 1][0][col_num - 1] == \"*\":\tnum_of_mines += 1\n\t\t\tif col_num < row_length - 1:\n\t\t\t\t# top right\n\t\t\t\tif array[row_num - 1][0][col_num + 1] == \"*\":\tnum_of_mines += 1\n\t\tif row_num < col_length - 1:\n\t\t\t# deals with bottom of the array\n\t\t\tif array[row_num + 1][0][col_num] == \"*\":\tnum_of_mines += 1\n\t\t\tif col_num > 0:\n\t\t\t\t# bottom left\n\t\t\t\tif array[row_num + 1][0][col_num - 1] == \"*\":\tnum_of_mines += 1\n\t\t\tif col_num < row_length - 1:\n\t\t\t\t# bottom right\n\t\t\t\tif array[row_num + 1][0][col_num + 1] == \"*\":\tnum_of_mines += 1\n\t\toutput_num += str(num_of_mines)\n\treturn output_num", "def get_neighbours(self, cell: Position) -> Iterable[Position]:\n x, y = cell\n\n return [\n (x - 1, y - 1), (x, y - 1), (x + 1, y - 1),\n (x - 1, y), (x + 1, y),\n (x - 1, y + 1), (x, y + 1), (x + 1, y + 1),\n ]", "def neighbours(number: int, number_sectors: int) -> [int, int, int, int]:\n col = number % number_sectors\n row = number // number_sectors\n\n nieg = [number - number_sectors, number + number_sectors, number - 1, number + 1]\n\n if row == 0:\n nieg[0] = -1\n if row == number_sectors - 1:\n nieg[1] = -1\n if col == 0:\n nieg[2] = -1\n if col == number_sectors - 1:\n nieg[3] = -1\n return nieg", "def test_make_neighbors(position):\n\n def result_row(i, size):\n return [i] + [i + 1] * (size - 2) + [i]\n\n size = position.size\n neigh_counts = [0] * (size ** 2)\n first_row = result_row(2, size)\n last_row = result_row(2, size)\n middle_row = result_row(3, size)\n desired_result = first_row + (middle_row) * (size - 2) + last_row\n\n for c, neighs in go.make_neighbors(size=size):\n for pt in list(neighs):\n neigh_counts[pt] += 1\n\n assert desired_result == neigh_counts", "def get_neighbours(lat, long):\n # ns = north east, ew = east west (ratio between 1 feet and degree) \n # its different on diferent places on earth (sphere)!!\n ns = 0.0025\n ew = 0.0025\n walk = []\n for i in range(-2, 3):\n for j in range(-2, 3):\n thiscell = CellId.from_lat_lng(LatLng.from_degrees(lat + ns*i, long + ew*j)).parent(S2_CELL_LEVEL)\n if abs(i * j) < 4:\n walk.append(thiscell.id())\n return sorted(walk)", "def neighbour(seq):\n it = iter(seq)\n it_next = itertools.islice(itertools.chain(iter(seq), [None]), 1, None)\n\n prev = None\n for curr, next in zip(it, it_next):\n yield(prev, curr, next)\n prev = curr", "def labelNeighbours26(data, label, x0,y0,z0, index):\n shape = label.shape;\n for xp in range(max(0,-1+x0),min(2+x0, shape[0])):\n for yp in range(max(0,-1+y0),min(2+y0, shape[1])):\n for zp in range(max(0,-1+z0),min(2+z0, shape[2])):\n if data[xp,yp,zp] and label[xp,yp,zp] == 0:\n label[xp,yp,zp] = index;\n label = labelNeighbours26(data, label, xp,yp,zp, index);\n return label;", "def neighbours(input_configuration, position):\n\n row_pos, seat_pos = position\n return [(check_row, check_seat)\n for check_row in range (row_pos-1, row_pos + 2) for check_seat in range (seat_pos-1, seat_pos+2)\n if (check_row != row_pos or check_seat != seat_pos)\n and (check_row, check_seat) in input_configuration.keys()]", "def construct_network_from_neighbours_list(related_characters: list):\n graph = nx.Graph()\n for edge in related_characters:\n sentiment = edge[1]\n color = ''\n if sentiment == 'Positive':\n color = 'g'\n elif sentiment == 'Negative':\n color = 'r'\n elif sentiment == 'Neutral':\n color = 'k'\n # graph.add_node(edge[0][0], popularity=\n graph.add_edge(edge[0][0], edge[0][1], color=color, weight=edge[2])\n\n return graph", "def neighbours(indexing, random_stream=None):\n\n # pre-compute some necessary values\n counts = compute_index_counts(indexing)\n binary_sm = compute_binary_set_mappings(indexing, counts)\n unary_sm = compute_unary_set_mappings(indexing, counts)\n empty = find_empty(counts)\n image = [idx for idx,count in enumerate(counts) if count != 0]\n \n def candidates(vertex, index, image, binary_sm, unary_sm, counts, empty):\n \"\"\"generates the set of possible target indices for a given vertex\n\n :param vertex: the vertex\n :type vertex: int\n :param index: the current index of the vertex\n :type index: int\n :param image: the image of the current indexing\n :type image: list\n :param binary_sm: result of `compute_binary_set_mappings`\n :type binary_sm: np.array[n,dtype=int]\n :param unary_sm: result of `compute_unary_set_mappings`\n :type unary_sm: np.array[n,dtype=int]\n :param counts: number of vertices/index\n :type counts: np.array[n,dtype=int]\n :param empty: an index that is assigned no vertex, None is also allowed\n :type empty: int/None\n :yield: iterator over target indices\n :rtype: Iterator[int]\n \"\"\"\n for k in image:\n if k == index:\n continue\n if counts[index] > 1 or counts[k] > 1:\n yield k\n elif vertex < unary_sm[k]: # implicitly: counts[index]==1 and counts[k]==1\n yield k\n if counts[index] > 2 or (counts[index] == 2 and vertex==binary_sm[index]):\n yield empty\n \n if random_stream is not None:\n # Random Move-Enumeration\n pweights = compute_probability_weights(indexing, counts, image, binary_sm)\n vertices = np.random.choice(indexing.shape[0], random_stream, p=pweights)\n for vertex in vertices:\n index = indexing[vertex]\n ks = list(candidates(vertex, index, image, binary_sm, unary_sm, counts, empty))\n k = random.choice(ks)\n yield vertex, k\n else:\n # Move-Enumeration\n for vertex, index in enumerate(indexing):\n for k in candidates(vertex, index, image, binary_sm, unary_sm, counts, empty):\n yield vertex, k", "def neighbours(pos):\r\n\t\tnbs = []\r\n\t\tfor direction in directions:\r\n\t\t\tnb = add(pos, direction)\r\n\t\t\tif is_inside(nb):\r\n\t\t\t\tnbs.append(nb)\r\n\t\treturn nbs", "def find_all_connectors(rows):\r\n lines = []\r\n short_lines=[]\r\n counter=0\r\n road_list=['FF']\r\n for row in rows:\r\n strings=string.split(row,'\\n')[1:]\r\n for stringI in strings:\r\n tokens=string.split(stringI,'\\t')\r\n if len(tokens)>1:\r\n if tokens[11] in road_list:\r\n description=tokens[13]\r\n clues=string.split(description, ' ')\r\n for c in clues:\r\n if c=='to':\r\n pieces=string.split(description, 'to')\r\n elif c=='TO':\r\n pieces=string.split(description, 'TO')\r\n else:\r\n continue\r\n i=string.split(pieces[0], ' ')\r\n j=string.split(pieces[1], ' ')\r\n road_i=0\r\n road_j=0\r\n dir_i=None\r\n dir_j=None\r\n dir_i, road_i=handle_special_cases(i)\r\n dir_j, road_j=handle_special_cases(j)\r\n if dir_i==None or road_i==None:\r\n for s in i:\r\n try:\r\n road_i=int(s)\r\n except ValueError:\r\n dir_i=parse_direction(s, dir_i)\r\n if dir_j==None or road_j==None:\r\n for t in j:\r\n try:\r\n road_j=int(t)\r\n except ValueError:\r\n dir_j=parse_direction(t, dir_j)\r\n if dir_i==None or dir_j==None or road_i==0 or road_j==0:\r\n if dir_i==None:\r\n print 'bad i in : ', description\r\n if dir_j==None:\r\n print 'bad j in : ', description\r\n if (road_i==0 or road_j==0):\r\n print 'unhandled road case: ', description \r\n else:\r\n \r\n# print stringI\r\n if tokens[2]==dir_j:\r\n counter=counter+1\r\n lines.append([int(tokens[0]), float(tokens[7]), tokens[2], [float(tokens[8]), float(tokens[9])], int(tokens[1])])\r\n short_lines.append([road_i, dir_i, [float(tokens[8]), float(tokens[9])], road_j, dir_j])\r\n# print 'connecting from road '+str(road_i)+' in direction '+dir_i+' to '+str(road_j)+' in direction '+dir_j+' near postmile '+tokens[7] \r\n elif tokens[2]==dir_i:\r\n counter=counter+1\r\n lines.append([int(tokens[0]), float(tokens[7]), tokens[2], [float(tokens[8]), float(tokens[9])], int(tokens[1])])\r\n short_lines.append([road_i, dir_i, [float(tokens[8]), float(tokens[9])], road_j, dir_j])\r\n# print 'connecting from road '+str(road_i)+' in direction '+dir_i+' near postmile '+tokens[7]+' to '+str(road_j)+' in direction '+dir_j \r\n else:\r\n print 'WEIRD DIRECTION: ', description\r\n continue \r\n #print 'dir_i: ', dir_i\r\n# print 'dir_j: ', dir_j\r\n\r\n print 'on august 10th, there were 110 total connectors and 98 could be parsed'\r\n print counter\r\n return lines, short_lines", "def neighbours((u,v)):\r\n return ((u,v+1), (u+1,v), (u,v-1), (u-1,v))", "def compute_neighbours(index, matrix):\n row, col = decode_to_matrix_cell(index, matrix)\n n1 = index + 1\n if n1 >= matrix.size or col == matrix.cols - 1:\n n1 = None\n\n n2 = index + matrix.cols\n if n2 >= matrix.size or row == matrix.rows - 1:\n n2 = None\n return n1, n2,", "def regex_grid(n):\n cx = 2 ** (n - 1)\n cy = 2 ** (n - 1)\n grid = [[grid_numbering(n, i , j, cx, cy) for i in range(2 ** n)] for j in range(2 ** n)]\n \n return grid", "def _get_neighbours(point):\n # Pull coords out of point.\n x = point[0]\n y = point[1]\n z = point[2]\n return ((x-1, y, z), (x+1, y, z), (x, y-1, z), (x, y+1, z), (x, y, z-1), (x, y, z+1))", "def _neuron_location(self, m, n):\n for i in range(m):\n for j in range(n):\n yield np.array([i, j])", "def make_mammalian_n_glycan_neighborhoods():\n neighborhoods = NeighborhoodCollection()\n\n _neuraminic = \"(%s)\" % ' + '.join(map(str, (\n FrozenMonosaccharideResidue.from_iupac_lite(\"NeuAc\"),\n FrozenMonosaccharideResidue.from_iupac_lite(\"NeuGc\")\n )))\n _terminal = _neuraminic + \\\n \" + max(%s - %%d, 0)\" % FrozenMonosaccharideResidue.from_iupac_lite(\"Hex\")\n _hexose = \"(%s)\" % ' + '.join(\n map(str, map(FrozenMonosaccharideResidue.from_iupac_lite, ['Hex', ])))\n _hexnac = \"(%s)\" % ' + '.join(\n map(str, map(FrozenMonosaccharideResidue.from_iupac_lite, ['HexNAc', ])))\n\n high_mannose = CompositionRangeRule(\n _hexose, 3, 12) & CompositionRangeRule(\n _hexnac, 2, 2) & CompositionRangeRule(\n _neuraminic, 0, 0)\n high_mannose.name = \"high-mannose\"\n neighborhoods.add(high_mannose)\n\n base_hexnac = 3\n base_terminal_groups = 2\n for i, spec in enumerate(['hybrid', 'bi', 'tri', 'tetra', 'penta', \"hexa\", \"hepta\"]):\n if spec == 'hybrid':\n rule = CompositionRangeRule(\n _hexnac, base_hexnac - 1, base_hexnac + 1\n ) & CompositionRangeRule(\n _neuraminic, 0, base_terminal_groups) & CompositionRangeRule(\n _hexose, base_hexnac + i - 1,\n base_hexnac + i + 3)\n rule.name = spec\n neighborhoods.add(rule)\n else:\n sialo = CompositionRangeRule(\n _hexnac, base_hexnac + i - 1, base_hexnac + i + 1\n ) & CompositionRangeRule(\n (_neuraminic), 1, base_terminal_groups + i\n ) & CompositionExpressionRule(\n \"(Hex > %d) & (Hex < (%d - (NeuAc + NeuGc)))\" % (base_hexnac + i - 2, base_hexnac + (2 * i) + 3))\n\n sialo.name = \"%s-antennary\" % spec\n asialo = CompositionRangeRule(\n _hexnac, base_hexnac + i - 1, base_hexnac + i + 1\n ) & CompositionRangeRule(\n _neuraminic, 0, 1 if i < 2 else 0\n ) & CompositionRangeRule(\n _hexose, base_hexnac + i - 1,\n base_hexnac + (2 * i) + 3)\n asialo.name = \"asialo-%s-antennary\" % spec\n neighborhoods.add(sialo)\n neighborhoods.add(asialo)\n return neighborhoods", "def neighbours(assignment): \n for index_1, index_2 in itertools.combinations(range(len(assignment)), 2):\n new_assign = list(assignment)\n new_assign[index_1], new_assign[index_2] = new_assign[index_2], new_assign[index_1]\n yield tuple(new_assign)", "def expand(self, network, radius):\n expansion = []\n neighbors = frozenset().union(*[set(network.getrow(g).indices) for g in self.genes]) \n for neighb in neighbors: \n if neighb in self.genes:\n continue\n preds = list(set(network.getrow(neighb).indices) & self.genes) \n if len(preds)>2:\n pass\n dist_seed = self.dist[preds].min() + 1\n if dist_seed > radius:\n continue\n \n next_pattern = copy.deepcopy(self)\n next_pattern.genes.add(neighb)\n next_pattern.edges |= set((pred, neighb) for pred in preds) \n next_pattern.added = neighb \n next_pattern.dist[neighb] = dist_seed\n expansion += [next_pattern]\n\n return expansion if len(expansion) > 0 else [self]", "def get_neighbours(self):\n n = [deepcopy(self.representation) for i in range(len(self.representation) - 1)]\n\n for count, i in enumerate(n):\n i[count], i[count + 1] = i[count + 1], i[count]\n\n n = [Individual(i) for i in n]\n return n", "def findMatches(sequence, patterns):\n#\n#\n# idGenerator = IdGenerator()\n# root = Edge('', None, idGenerator)\n# i = 0\n# sequence = sequence + '$'\n# print len(sequence)\n# for i in range(len(sequence)):\n# seq = sequence[i:]\n# edge = root\n# while len(seq) > 0:\n# edge = edge.addSequence(seq, i)\n# seq = seq[1:]\n# print i\n # root = buildTrie(generateSequences(sequence))\n matches = [[m.start() for m in re.finditer('(?=' + pattern + ')', sequence)] for pattern in patterns]\n return matches", "def neighbors(self, cell):\n x = cell.x\n y = cell.y\n for new_x, new_y in [(x, y - 1), (x, y + 1), (x - 1, y), (x + 1, y)]:\n neighbor = self[new_x, new_y]\n if neighbor is not None:\n yield neighbor", "def get_neighbours(self):\r\n n = [deepcopy(self.representation) for i in range(len(self.representation) - 1)]\r\n\r\n for count, i in enumerate(n):\r\n i[count], i[count + 1] = i[count + 1], i[count]\r\n\r\n n = [Individual(i) for i in n]\r\n return n", "def island():\n\n grid = [\n [\"1\", \"1\", \"0\", \"0\", \"0\"],\n [\"1\", \"1\", \"0\", \"0\", \"0\"],\n [\"0\", \"0\", \"1\", \"0\", \"0\"],\n [\"0\", \"0\", \"0\", \"1\", \"1\"]\n ]\n\n def dfs():\n rows = len(grid)\n cols = len(grid[0])\n count = 0\n for i in range(0, rows):\n for j in range(0, cols):\n if grid[i][j] == '1':\n check_valid(i, j, grid)\n count = count + 1\n return count\n\n def check_valid(i, j, grid=None):\n rows = len(grid)\n cols = len(grid[0])\n\n if not 0 <= i < rows or not 0 <= j < cols or grid[i][j] != '1':\n return\n\n grid[i][j] = '0'\n\n check_valid(i + 1, j, grid)\n check_valid(i - 1, j, grid)\n check_valid(i, j + 1, grid)\n check_valid(i, j - 1, grid)\n\n return dfs()", "def lldp_neighbour(self, output_str):\n\n date_time = get_date_time()\n # connections = []\n # top = 212\n # # bottom = -33\n # trimmed_str = output_str[top:]\n # for line in trimmed_str.split('\\n'):\n # line_content = line.split()\n # local_port = line_content[1] + line_content[2]\n # remote_device = line_content[0].split('.')[0]\n # remote_port = line_content[8] + line_content[9]\n # connections.append([self.device, local_port.strip(), remote_device.strip(), remote_port.strip(),\n # date_time])\n #\n # connections_df = pd.DataFrame(connections,\n # columns=['local_device', 'local_port', 'remote_device', 'remote_port',\n # 'date_time'])\n\n connections = []\n top = 289\n bottom = -33\n trimmed_str = output_str[top:bottom]\n for line in trimmed_str.split('\\n'):\n line_content = line.split()\n local_port = line_content[1] + line_content[2]\n remote_device = line_content[0].split('.')[0]\n remote_port = line_content[9] + line_content[10]\n connections.append([self.device, local_port.strip(), remote_device.strip(), remote_port.strip(),\n date_time])\n\n connections_df = pd.DataFrame(connections,\n columns=['local_device', 'local_port', 'remote_device', 'remote_port',\n 'date_time'])\n return connections_df", "def get_2_step_neighbours(node):\n for i in range(len(node)):\n yield node[0:i] + (flip(node[i]),) + node[i+1:]\n\n for i, j in itertools.permutations(range(len(node)), 2):\n if i < j:\n yield node[0:i] + (flip(node[i]),) + node[i+1:j] + (flip(node[j]),) + node[j+1:]", "def draw_neighbor_counts(img_bgr, rafts_loc, num_of_rafts):\n points = rafts_loc\n vor = ScipyVoronoi(points)\n neighbor_counts = np.zeros(num_of_rafts, dtype=int)\n for raft_id in range(num_of_rafts):\n neighbor_counts[raft_id] = np.count_nonzero(vor.ridge_points.ravel() == raft_id)\n\n font_face = cv.FONT_ITALIC\n font_scale = 0.5\n font_color = (0, 165, 255) # BGR\n font_thickness = 1\n output_img = img_bgr\n for raft_id in np.arange(num_of_rafts):\n text_size, _ = cv.getTextSize(str(raft_id + 1), font_face, font_scale, font_thickness)\n output_img = cv.putText(output_img, str(neighbor_counts[raft_id]),\n (rafts_loc[raft_id, 0] + text_size[0] // 2, rafts_loc[raft_id, 1] + text_size[1]),\n font_face, font_scale, font_color, font_thickness, cv.LINE_AA)\n\n return output_img", "def buildNetwork(binary_matrix,seed_index):\n \n \n # Get starting point for network\n seed = set(np.where(binary_matrix[seed_index]==1)[0])\n cluster = set(seed)\n NEW = set(seed)\n cluster.update(np.array([seed_index]))\n NEW.update(np.array([seed_index]))\n while True:\n temp_set = set()\n for n in NEW:\n # temp_set will have all atoms, without duplicates,\n # that are connected to all atoms in NEW.\n temp_set.update(np.where(binary_matrix[n]==1)[0])\n if temp_set.issubset(cluster):\n # if temp_set has no new atoms, the search is done.\n break\n else:\n NEW = temp_set - cluster # List of newly discovered atoms\n cluster.update(temp_set) # cluster is updated with new atoms\n return(cluster)", "def generateNeighborMap(self):\n A=[]\n for key,value in self._ts_dict.iteritems():\n A.append(np.array([i.replace(\"#\",\" \")\n .split()[0:4] for i in value.index])\n .astype(float))\n\n B=np.array(A[0]).reshape(len(A[0]),4)\n print (B[:,0]+B[:,1])/2\n A=[]\n for key,value in self._ts_dict.iteritems():\n A.append(value.sum(axis=1).values)\n print A", "def findNeighbours(self):\n neighbours = []\n\n for i in range(self.xCoordinate - 1, self.xCoordinate + 2):\n for j in range(self.yCoordinate - 1, self.yCoordinate + 2):\n if (not (i == self.xCoordinate and j == self.yCoordinate)) and (0 <= i <= 394 and 0 <= j <= 499):\n neighbours.append(PixelPosition(i, j))\n\n return neighbours", "def distance_between_pattern_and_strings(pattern, dna):\n\n k = len(pattern)\n distance = 0\n\n for text in dna:\n hamming_distance = 1000000\n for i in range(len(text) - k + 1):\n if hamming_distance > compute_hamming_distance(pattern, text[i:i + k]):\n hamming_distance = compute_hamming_distance(pattern, text[i:i + k])\n distance = distance + hamming_distance\n return distance", "def get_ngrams(seq, n):\n return", "def chromosome_to_cycle(chromosome):\n nodes = [0] * (len(chromosome) * 2 + 1)\n chromo = [0] + chromosome.copy()\n for j in range(1, len(chromo)):\n i = chromo[j]\n if i > 0:\n nodes[2 * j - 1] = 2 * i - 1\n nodes[2 * j] = 2 * i\n else:\n nodes[2 * j - 1] = -2 * i\n nodes[2 * j] = -2 * i - 1\n return nodes[1:]", "def iter_neighbors(x: int, y: int) -> t.Generator[COORDINATE, None, None]:\n yield x - 1, y\n yield x + 1, y\n yield x, y - 1\n yield x, y + 1", "def parseNeighbors(urls):\n parts = re.split(r'\\s+', urls)\n for i in range(len(parts)):\n for j in range(i,len(parts)):\n if i!=j:\n yield parts[i],parts[j]", "def _neuron_locations(self, m, n):\n #Nested iterations over both dimensions\n #to generate all 2-D locations in the map\n for i in range(m):\n for j in range(n):\n yield np.array([i, j])", "def _neuron_locations(self, m, n):\n #Nested iterations over both dimensions\n #to generate all 2-D locations in the map\n for i in range(m):\n for j in range(n):\n yield np.array([i, j])", "def expandX(self, network, radius):\n expansion = []\n for gene in self.genes:\n for neighb in network.getrow(gene).indices:\n if neighb in self.genes:\n continue\n if self.dist[gene]+1 > radius:\n continue\n next_pattern = copy.deepcopy(self)\n next_pattern.genes.add(neighb)\n #next_pattern.edges.add((gene, neighb))\n next_pattern.added = neighb \n next_pattern.dist[neighb] = self.dist[gene] + 1\n expansion += [next_pattern]\n\n return expansion if len(expansion) > 0 else [self]", "def set_pattern(self, pattern):\n for ir, row in enumerate(pattern):\n for ic, col in enumerate(row):\n relay_n = ir*len(row) + ic\n self.relay.set(relay_n, bool(col))", "def findIsland(i, j, matrix):\n visited = [[ False for ele in row] for row in matrix]\n totalEdges = traverseNodes(i, j, matrix, visited)\n\n return totalEdges", "def get_neighbours(self, business, num=5, add_self=False):\n\n def radius_step(radius, num_longtidues, num_latitudes, time):\n \"\"\"expand the search-radius exponentially\"\"\"\n step = int(exp(time))\n radius['long_down'] = radius['long_down'] - step\n if radius['long_down'] <= 0:\n radius['long_down'] = 0\n radius['long_up'] = radius['long_up'] + step\n if radius['long_up'] >= num_longtidues - 1:\n radius['long_up'] = num_longtidues - 1\n radius['lat_down'] = radius['lat_down'] - step\n if radius['lat_down'] <= 0:\n radius['lat_down'] = 0\n radius['lat_up'] = radius['lat_up'] + step\n if radius['lat_up'] >= num_latitudes - 1:\n radius['lat_up'] = num_latitudes - 1\n\n cell = self.get_cell(business)\n b_long = business.longitude\n b_lat = business.latitude\n radius = {'long_down': cell[0], 'long_up': cell[0] + 1,\n 'lat_down': cell[1], 'lat_up': cell[1] + 1}\n ret = []\n time = 0\n inner_radius = 0\n while len(ret) < num and inner_radius < 100:\n found = []\n radius_step(radius, self.longitudes.size, self.latitudes.size,\n time)\n time = time + 1\n for row in range(radius['long_down'], radius['long_up']):\n for col in range(radius['lat_down'], radius['lat_up']):\n if row in self.cells and col in self.cells[row]:\n for item in self.cells[row][col]:\n if item not in ret:\n found.append(item)\n if (len(found) + len(ret)) < num:\n continue\n # We approximate the in-radius of the search-rectangle by half of\n # the distance between the centers of left and right border\n # (Not exactly the in-radius on the surface of a sphereoid, but\n # easier to calculate)\n inner_radius = haversine((self.longitudes[radius['long_down']],\n self.latitudes[cell[1]]),\n (self.longitudes[radius['long_up']],\n self.latitudes[cell[1]])) / 2\n for neighbour in found:\n n_long = neighbour['longitude']\n n_lat = neighbour['latitude']\n dist = haversine((b_long, b_lat), (n_long, n_lat))\n # make sure we only include businesses in the in-circle of the\n # search-rectangle\n if dist <= inner_radius and \\\n (add_self or neighbour['index'] != business.name):\n neighbour['distance'] = dist\n ret.append(neighbour)\n return sorted(ret, key=itemgetter('distance'))[:num]", "def get_neighbours(self):\n return []", "def numIslands(grid):\n # count to store each new island found\n count = 0\n # If the grid is empty, return 0\n if not grid:\n return count\n\n y_max = len(grid)\n x_max = len(grid[0])\n \n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == '1':\n dfs(grid, i, j)\n count += 1\n return count", "def find_neighbors(self):\n #checked#\n ###your code here###\n for address in self.homes:\n for i in range(-1, 2):\n for j in range(-1,2):\n neighbor_address=(address[0]+i, address[1]+j)\n if neighbor_address in self.homes and neighbor_address!=address:\n self.homes[address].neighbors.append(self.homes[neighbor_address])", "def get_neighbours(train, test_row, num_neighbours, distance_metrics=\"block\"):\n distances = []\n for train_row in train:\n if distance_metrics == \"block\":\n distance = block_distance(test_row, train_row)\n else:\n distance = euclidean_distance(test_row, train_row)\n distances.append((train_row, distance))\n distances.sort(key=lambda tup: tup[1])\n neigbours = []\n for i in range(num_neighbours):\n neigbours.append(distances[i][0])\n return neigbours", "def neighbors((min_i, min_j), (max_i, max_j), (i, j)):\n if j + 1 <= max_j:\n yield (i, j + 1)\n if j - 1 >= min_j:\n yield (i, j - 1)\n if i + 1 <= max_i:\n yield (i + 1, j)\n if i - 1 >= min_i:\n yield (i - 1, j)", "def print_neighbours(self, word=''):\n\n if word in self.index.keys():\n word_ind = self.index[word]\n for i in self.graph[word_ind]:\n print(self.words[i])\n print()\n else:\n print('Error - Not a valid word')", "def test_multigrid_calculates_neighbours_correctly():\n\n # create a grid which will result in 9 cells\n h = 64\n img_dim = (3 * h + 1, 3 * h + 1)\n amg = mg.MultiGrid(img_dim, h, WS=127)\n\n # check that each cell has the expected neighbours\n print(amg.n_cells)\n\n # expected neieghbours left to right, bottom to top\n cells = [{\"north\": amg.cells[3], \"east\": amg.cells[1], \"south\": None, \"west\": None}, # bl\n {\"north\": amg.cells[4], \"east\": amg.cells[2],\n \"south\": None, \"west\": amg.cells[0]}, # bm\n {\"north\": amg.cells[5], \"east\": None,\n \"south\": None, \"west\": amg.cells[1]}, # br\n {\"north\": amg.cells[6], \"east\": amg.cells[4],\n \"south\": amg.cells[0], \"west\": None}, # ml\n {\"north\": amg.cells[7], \"east\": amg.cells[5],\n \"south\": amg.cells[1], \"west\": amg.cells[3]}, # mm\n {\"north\": amg.cells[8], \"east\": None,\n \"south\": amg.cells[2], \"west\": amg.cells[4]}, # mr\n # tl\n {\"north\": None, \"east\": amg.cells[7],\n \"south\": amg.cells[3], \"west\": None},\n # tm\n {\"north\": None,\n \"east\": amg.cells[8], \"south\": amg.cells[4], \"west\": amg.cells[6]},\n {\"north\": None, \"east\": None,\n \"south\": amg.cells[5], \"west\": amg.cells[7]}, # tr\n ]\n\n for ii, (gc, cell) in enumerate(zip(amg.cells, cells)):\n print(ii)\n assert gc.north == cell['north']\n assert gc.east == cell['east']\n assert gc.south == cell['south']\n assert gc.west == cell['west']", "def find_path_dfs(world_nparray):\n world_ndarray = np.copy(world_nparray)\n start = tuple(np.argwhere(world_ndarray == -2)[0])\n goal = tuple(np.argwhere(world_ndarray == -3)[0])\n\n world_ndarray[world_ndarray == -2] = 0\n world_ndarray[world_ndarray == -3] = 0\n\n world_tuple = tuple(map(tuple, world_ndarray))\n\n stack = deque([(\"\", start)]) # deque appends faster.\n visited = set() # Each element has to be unique in a set\n graph = get_neighbors(world_tuple)\n route_str = \"\"\n cost = 0\n\n while stack:\n path, current = stack.pop() # LIFO : Last in, first out\n if current == goal:\n route_str = path\n break\n if current in visited:\n continue\n visited.add(current)\n cost += 1\n for direction, neighbour in graph[current].iteritems():\n stack.append((path + direction, neighbour))\n world_ndarray[neighbour] = cost\n\n # print \"Expanded nodes(DFS): \", len(visited), \" Path length: \", len(route_str)\n # Convert string directions to 2D(x,y) coordinates\n route_coord = [start]\n for p in route_str:\n route_coord.append(graph[route_coord[-1]][p])\n\n world_ndarray[start] = -2\n world_ndarray[goal] = -3\n\n return route_coord, world_ndarray, len(visited), len(route_str)", "def find_match_DFS(self, array, pattern):\n def simulate(state_id, match, idx):\n if idx >= len(array):\n return Match(pattern=self.pattern) # no match\n state = self.states[state_id]\n for s in state.transitions:\n if s.symbol & array[idx]:\n if s.is_final:\n if array[idx+1:].count(BLACK) == 0:\n match_final = match + [s.symbol]\n match_final += [WHITE] * (len(array) - idx - 1)\n return Match(match_final, pattern=self.pattern)\n # else: its not added to the stack\n else:\n ans = simulate(s.id, match + [s.symbol], idx+1)\n if ans.is_match:\n return ans\n return Match(pattern=self.pattern) # no match\n min_length = sum(pattern) + len(pattern) -1\n self.compile(pattern) # create the state first\n\n return simulate(0, [], 0) # start recursive call", "def motif_enumeration(dnas, k, d):\n assert (all(is_dna(dna) for dna in dnas))\n patterns = set()\n for kmer in get_all_kmers(dnas[0], k):\n for kmer_neigh in get_neighbours(kmer, d):\n counts = [count_occurrences(dna, kmer_neigh, d) for dna in dnas]\n if all(counts):\n patterns.add(kmer_neigh)\n return patterns", "def get_neighbours(point, grid):\n # possible movements (diagonally is impossible)\n dy, dx = [-1, 0, 1, 0], [0, 1, 0, -1]\n\n neighbours = []\n for i in range(4):\n y, x = point[0] + dy[i], point[1] + dx[i]\n\n # skip if not within maze's bounds (NOT actually needed since there is a \"#\" barrier around the maze)\n # if not (0 <= x < len(grid) and 0 <= y < len(grid[0])):\n # continue\n\n point_type = grid[y][x]\n if point_type == \"#\": # skip if wall\n continue\n neighbours.append((y, x))\n\n return neighbours", "def ngramas(n, string):\n\n ngrams = []\n i = 0\n while i + n < len(string):\n ngrams.append(string[i:i + n])\n i += 1\n\n return ngrams", "def neighbours(ar, cur_index, cnt_of_neiboors=3, exclude_from_neibors_index=[]):\n rmax = np.max([0, cur_index + cnt_of_neiboors - len(ar)])\n lmin = np.max([cur_index - (cnt_of_neiboors + rmax), 0])\n\n excl = set(exclude_from_neibors_index) | {cur_index}\n nbs = [i for i in range(lmin, len(ar)) if i not in excl]\n return ar[nbs[:cnt_of_neiboors * 2]]", "def find_pattern_positions(pattern, DNA, mutation_thresh=0):\n positions = []\n for i in range(len(DNA) - len(pattern) + 1):\n\n current_pattern = DNA[i: (i + len(pattern))]\n if hamming_distance(pattern, current_pattern) <= mutation_thresh:\n positions.append(i)\n\n return positions", "def neighbor(self, position):\n if self.options['PeriOpt']:\n if position[0] == 0:\n yield (self.size[0] - 1, position[1]), 0\n elif position[0] == self.size[0] - 1:\n yield (0, position[1]), 1\n if position[1] == 0:\n yield (position[0], self.size[1] - 1), 2\n elif position[1] == self.size[1] - 1:\n yield (position[0], 0), 3\n\n if position[0] > 0:\n yield (position[0] - 1, position[1]), 0\n if position[0] < self.size[0] - 1:\n yield (position[0] + 1, position[1]), 1\n if position[1] > 0:\n yield (position[0], position[1] - 1), 2\n if position[1] < self.size[1] - 1:\n yield (position[0], position[1] + 1), 3", "def get_neighbours(self, cell):\n\t\tx,y = cell.find_id()\n\t\tlength = self.space.shape[1]\n\t\twidth = self.space.shape[0]\n\t\tif (length == 0 or width == 0 or x < 0 or x >= length or y < 0 or y >= width):\n\t\t\treturn []\n\t\tneighs = [(i,j) for i in range(y-1,y+2) if 0<=i<width for j in range(x-1,x+2) if 0<=j<length]\n\t\tneighbours = []\n\t\tfor neigh in neighs:\n\t\t\tneighbours.append(self.space[neigh[0],neigh[1]])\n\t\treturn neighbours", "def build_island_subtree(node, pattern, mapping):\n last_processed = 0\n content = node.text\n children = []\n\n # Intervals describes a non-overlapping splitting of the content according to the pattern.\n intervals = []\n for m in re.finditer(pattern, content):\n intervals.extend([(g, m.start(g), m.end(g)) for g in list(pattern.groupindex.keys()) if m.start(g) != m.end(g)])\n intervals.sort(key=lambda x: (x[1], x[2]))\n\n for interval in intervals:\n # Create simple HDDToken of the substring proceeding a subgroup.\n if last_processed < interval[1]:\n next_token_text = content[last_processed:interval[1]]\n prefix = content[0:last_processed]\n children.append(HDDToken(name='',\n text=next_token_text,\n start=Position(node.start.line + content[0:last_processed].count('\\n'),\n len(prefix) - prefix.rfind('\\n')),\n end=Position(node.start.line + next_token_text.count('\\n'),\n len(next_token_text) - next_token_text.rfind('\\n')),\n replace=next_token_text))\n\n # Process an island and save its subtree.\n children.append(build_hdd_tree(input_stream=InputStream(content[interval[1]:interval[2]]),\n grammar_name=mapping[interval[0]][0],\n start_rule=mapping[interval[0]][1]))\n last_processed = interval[2]\n\n # Create simple HDDToken of the substring following the last subgroup if any.\n if last_processed < len(content):\n next_token_text = content[last_processed:]\n prefix = content[0:last_processed]\n children.append(HDDToken(name='',\n text=next_token_text,\n start=Position(node.start.line + content[0:last_processed].count('\\n'),\n len(prefix) - prefix.rfind('\\n')),\n end=Position(node.start.line + next_token_text.count('\\n'),\n len(next_token_text) - next_token_text.rfind('\\n')),\n replace=next_token_text))\n return children", "def matched_neighbors(coord, second_char, matrix, row_length, column_length):\n row_number, column_number = coord\n neighbors_coordinates = [(row, column) for row in xrange(row_number - 1, row_number + 2)\n for column in xrange(column_number - 1, column_number + 2)\n if row_length > row >= 0 and column_length > column >= 0\n and coord_char((row, column), matrix) == second_char\n and not (row, column) == coord]\n\n return neighbors_coordinates", "def _setup_markov_network(self, probabilistic):\n for index_counter in range(self.genome.shape[0] - 1):\n # Sequence of 42 then 213 indicates a new Markov Gate", "def neighbour_cells(id, Nx):\n r = cell_coord(id, Nx)\n neighs = []\n tmp = np.arange(3) - 1\n for p in itertools.product(tmp, tmp, tmp):\n neigh = (r + p) % Nx\n neighs.append(neigh)\n return [id_from_coord(neigh, Nx) for neigh in neighs]", "def get_neighbours(pos):\n neighbours = {tuple(sum(x) for x in zip(pos, offset)) for offset in relative_positions}\n return neighbours", "def str_search_two(pattern, text):\n N, M = len(text), len(pattern)\n i, j = 0, 0 \n while i < N and j < M:\n if text[i] == pattern[j]:\n j += 1\n else:\n i -= j\n j = 0 \n i += 1\n if j == M:\n return i - M\n else:\n return N", "def neighborhood(index, npoints, maxdist=1):\n return [index + i for i in range(-maxdist, maxdist + 1)\n if i != 0 and 0 <= index + i <= npoints - 1]", "def neighbours2((u,v)):\r\n\r\n return ((u-1, v+1), (u,v+1), (u+1,v+1), \r\n (u-1,v), (u+1,v),\r\n (u-1,v-1), (u,v-1), (u+1,v-1))", "def DiscoverPatterns(parameters, graph):\n patternCount = 0\n # get initial one-edge patterns\n parentPatternList = GetInitialPatterns(graph, parameters.temporal)\n if DEBUGFLAG:\n print(\"Initial patterns (\" + str(len(parentPatternList)) + \"):\")\n for pattern in parentPatternList:\n pattern.print_pattern(' ')\n discoveredPatternList = []\n while ((patternCount < parameters.limit) and parentPatternList):\n print(str(parameters.limit - patternCount) + \" patterns left\")\n childPatternList = []\n # extend each pattern in parent list (***** todo: in parallel)\n while (parentPatternList):\n parentPattern = parentPatternList.pop(0)\n if ((len(parentPattern.instances) > 1) and (patternCount < parameters.limit)):\n patternCount += 1\n extendedPatternList = Pattern.ExtendPattern(parentPattern, parameters.temporal)\n while (extendedPatternList):\n extendedPattern = extendedPatternList.pop(0)\n if DEBUGFLAG:\n print(\"Extended Pattern:\")\n extendedPattern.print_pattern(' ')\n if (len(extendedPattern.definition.edges) <= parameters.maxSize):\n # evaluate each extension and add to child list\n extendedPattern.evaluate(graph)\n if ((not parameters.prune) or (extendedPattern.value >= parentPattern.value)):\n Pattern.PatternListInsert(extendedPattern, childPatternList, parameters.beamWidth, parameters.valueBased)\n # add parent pattern to final discovered list\n if (len(parentPattern.definition.edges) >= parameters.minSize):\n Pattern.PatternListInsert(parentPattern, discoveredPatternList, parameters.numBest, False) # valueBased = False\n parentPatternList = childPatternList\n # insert any remaining patterns in parent list on to discovered list\n while (parentPatternList):\n parentPattern = parentPatternList.pop(0)\n if (len(parentPattern.definition.edges) >= parameters.minSize):\n Pattern.PatternListInsert(parentPattern, discoveredPatternList, parameters.numBest, False) # valueBased = False\n return discoveredPatternList", "def get_neighbours_and_directions(self, from_position):\n \n # Transform index into board matrix into index into index into neighbour matrix\n from_row_index = self.board_to_connection_index(from_position)\n row = self.connection_matrix[from_row_index]\n \n neighbours = []\n for col_num in range(0, len(row)): \n if row[col_num]:\n # Transform index into board index\n board_index = self.connection_to_board_index(col_num)\n if self.board[board_index[0]][board_index[1]].state != PegState.EMPTY:\n neighbours.append((board_index, row[col_num])) # Store board index and direction in neighbours\n return neighbours", "def neighbours(self):\n\n neighbours = []\n root = self.root\n if self == root:\n return neighbours\n\n ########################\n # IMMEDIATELY ADJACENT #\n sizes = [self.maxs[0] - self.mins[0], self.maxs[1] - self.mins[1]]\n coords = [(self.mins[0] + sizes[0] / 2, self.maxs[1] + sizes[1] / 2,),\n (self.maxs[0] + sizes[0] / 2, self.mins[1] + sizes[1] / 2,),\n (self.mins[0] + sizes[0] / 2, self.mins[1] - sizes[1] / 2,),\n (self.maxs[0] - sizes[0] / 2, self.mins[1] + sizes[1] / 2,),]\n # loop through top, right, bottom, left\n for i in range(4):\n x, y = coords[i]\n query_quad = root.query_xy(x, y)\n if query_quad is not None:\n same_size_idx = query_quad.location[: self.tree_depth]\n same_size_quad = root[same_size_idx]\n neighbours += list(self._get_border_children(same_size_quad, i))\n\n #############\n # DIAGONALS #\n root_sizes = [root.maxs[0] - root.mins[0], root.maxs[1] - root.mins[1]]\n xs, ys = (root_sizes / 2 ** root.max_tree_depth) / 2\n neighbours += [\n root.query_xy(self.mins[0] - xs, self.mins[1] - ys), # TL\n root.query_xy(self.maxs[0] + xs, self.mins[1] - ys), # TR\n root.query_xy(self.mins[0] - xs, self.maxs[1] + ys), # BL\n root.query_xy(self.maxs[0] + xs, self.maxs[1] + ys), # BR\n ]\n\n unique_neighbours = list(set(neighbours))\n try:\n unique_neighbours.remove(self)\n except ValueError:\n pass\n\n return unique_neighbours", "def test_get_neighbours(self):\n data = [[0, 0, 0],\n [0, 0, 0],\n [0, 0, 0]]\n board = Board(data)\n\n # [pos(x, y), #neighbours]\n posx_posy_n = [[0, 0, 2], [0, 1, 3], [0, 2, 2],\n [1, 0, 3], [1, 1, 4], [1, 2, 3],\n [2, 0, 2], [2, 1, 3], [2, 2, 2]]\n for x, y, n in posx_posy_n:\n neighbours = [i for i in board.get_neighbours(x, y)]\n self.assertEquals(len(neighbours), n)" ]
[ "0.7232133", "0.674774", "0.6449919", "0.6243702", "0.6238935", "0.6133001", "0.612903", "0.5946089", "0.5851762", "0.5826999", "0.5814815", "0.5778444", "0.5769048", "0.5745069", "0.55670846", "0.5559852", "0.5557054", "0.5541717", "0.55216914", "0.55193025", "0.54643875", "0.5460622", "0.5423978", "0.54168546", "0.5381349", "0.53689903", "0.5359835", "0.53541565", "0.5339618", "0.5327189", "0.5320547", "0.5314955", "0.52926636", "0.52798986", "0.52650875", "0.5249887", "0.52368027", "0.51905954", "0.5185613", "0.5174721", "0.5169737", "0.5154405", "0.5150492", "0.5139507", "0.51356065", "0.5132619", "0.512866", "0.5126626", "0.51016", "0.5089032", "0.50882256", "0.50841284", "0.5065333", "0.5063199", "0.505473", "0.50522214", "0.5042274", "0.50311154", "0.5023011", "0.5017297", "0.5016788", "0.5014661", "0.5008669", "0.50032264", "0.49986687", "0.49986318", "0.49962187", "0.49962187", "0.49861294", "0.49797493", "0.49650535", "0.49646378", "0.4953805", "0.49526894", "0.49514922", "0.49511987", "0.49378285", "0.493069", "0.4928701", "0.4922016", "0.4911072", "0.49023807", "0.48996672", "0.4898839", "0.48933765", "0.4892418", "0.48899022", "0.48877847", "0.48780513", "0.4877502", "0.48721138", "0.48653814", "0.48610568", "0.48603442", "0.4858599", "0.48532462", "0.4847811", "0.48454085", "0.48434207", "0.4840074" ]
0.64392906
3
Whether the given card matches this card
def is_match(self, card): return self.suit == card.suit or self.value == card.value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _validate_card_match(self, chosen_card, active_card, active_suit):\n\t\treturn chosen_card.is_match(active_card) or chosen_card.suit == active_suit", "def __eq__(self, other_card):\n if self.rank == other_card.rank or self.suit == other_card.suit:\n return True\n else:\n return False", "def check_cards(self, cards):\n if len(cards) != 3:\n return False\n\n match = 0\n card1 = cards[0][1]\n card2 = cards[1][1]\n card3 = cards[2][1]\n\n match += self.compare_element(card1, card2, card3, 'shape')\n match += self.compare_element(card1, card2, card3, 'colour')\n match += self.compare_element(card1, card2, card3, 'count')\n match += self.compare_element(card1, card2, card3, 'fill')\n\n return match == 4", "def check_card(card1, card2):\r\n\r\n num1 = card1.split(' ')[0]\r\n num2 = card2.split(' ')[0]\r\n\r\n if num1 == num2:\r\n return True\r\n else:\r\n return False", "def __eq__(self, card2):\n return self.suit == card2.suit and self.rank == card2.rank", "def can_play(self, card):\n played_cards = map(lambda x: str(x).lower(), self.played_cards)\n if str(card).lower() in played_cards:\n return False\n if card.prebuild in played_cards:\n return True\n\n for res in card.cost", "def does_player_have_card(self, player, card):\n return card in self.hands[player]", "def __eq__(self, other: Card) -> bool:\n return compare_map[self.number] == compare_map[other.number]", "def cardPlayable(self, card):\n return self.field[Suit.toInt(card.getSuit()) - 1] == card.getValue() - 1", "def is_valid(self, card):\n # type: (str, Card) -> bool\n if card.version == \"3.0\":\n return False\n fingerprint = self.crypto.calculate_fingerprint(\n Utils.strtobytes(card.snapshot)\n )\n fingerprint_hex = fingerprint.to_hex\n if fingerprint_hex != card.id:\n return False\n verifiers = self.verifiers.copy()\n card_public_key = self.crypto.import_public_key(card.public_key)\n verifiers[fingerprint_hex] = card_public_key\n for key in verifiers:\n if key not in card.signatures:\n return False\n is_valid = self.crypto.verify(\n fingerprint.value,\n Utils.b64tobytes(card.signatures[key]),\n verifiers[key]\n )\n if not is_valid:\n return False\n return True", "def _check_suit_or_value_match(cls, card1, card2):\n\t\tsuit_match, value_match = False, False\n\t\tif (card1.suit == card2.suit) or (card2.suit == constants.CARD_BLACK) or (card1.suit == constants.CARD_BLACK):\n\t\t\tsuit_match = True\n\t\tif card1.value == card2.value:\n\t\t\tvalue_match = True\n\t\treturn suit_match or value_match", "def is_card_playable(self, card):\n color_index = COLOR.index(card[0])\n return len(self.firework[color_index]) == int(card[1]) - 1", "def __eq__(self, other):\n if isinstance(other, Card):\n return self.color == other.color and self.value == other.value\n return False", "def validate_cards(self, cards_list):\n return set(self.hand).issubset(set(cards_list))", "def cardExists(self, id):\n return id in self.cards", "def is_blackjack(self) -> bool:\n if self.score == 21 and len(self.cards) == 2:\n return True\n else:\n return False", "def check_valid(self, cards):\n\n if len(cards) == 1: # one card\n return True\n if len(cards) == 2: # two cards\n if ((self.num_to_card(int(cards[0])) == self.num_to_card(int(cards[1]))) or # two same cards\n (int(cards[0]) > 51) or # any card and a joker\n (int(cards[1])) > 51): # any card and a joker\n return True\n return False\n\n # 3 or more: all same number/ascending order\n # check how many jokers\n jokers = 0\n for card in cards:\n #print(int(card))\n #print(self.num_to_card(card))\n if int(card) > 51:\n jokers += 1\n #print(\"YESSSSSSSSSSIR\")\n #print(f'[THERE ARE {jokers} JOKERS]')\n\n # check if all same number\n sort = sorted(cards)\n #print(f'[THE SORTED CARDS: {sort}]')\n index = 0\n for card in sort:\n if self.num_to_card(int(card)) == self.num_to_card(int(sort[0])) or int(card) > 51:\n index += 1\n if index == len(cards):\n return True\n\n # check ascend order\n if not self.is_same_sign(cards):\n print('Here')\n return False\n\n #print(\"accend left\")\n return self.ascend(cards, jokers)", "def valid(self, a_card: card.Card) -> bool:\n if self._pile:\n return self._pile[-1].foundation_valid(a_card)\n if a_card.value == 0:\n return True\n return False", "def check_card_number(self, card_number):\n database_cursor.execute(f\"SELECT number FROM card WHERE number = {card_number};\")\n result = database_cursor.fetchall()\n return result[0][0] == card_number if result else False", "def check_color_card(player, color):\n for card in player.cards:\n if card.suit == color:\n return True", "def is_same_sign(self, cards):\n\n jokers = 0\n w_o_jokers = []\n for card in cards:\n if self.num_to_card(int(card)) == 0:\n jokers += 1\n else:\n w_o_jokers.append(int(card))\n\n w_o_jokers = sorted(w_o_jokers)\n print(\"whitout jokers: \", w_o_jokers)\n if w_o_jokers[0] <= 12: # if the cards are CLUBS\n if w_o_jokers[-1] > 12:\n return False\n if w_o_jokers[0] <= 25: # if the cards are DIAMONDS\n if w_o_jokers[-1] > 25:\n return False\n if w_o_jokers[0] <= 38: # HEARTS\n if w_o_jokers[-1] > 38:\n return False\n if w_o_jokers[0] <= 51:\n if w_o_jokers[-1] > 51:\n return False\n return True", "def valid(self, a_card: card.Card) -> bool:\n if self._pile:\n return self._pile[-1].tableau_valid(a_card)\n if a_card.value == 12:\n return True\n return False", "def hash_comparison(self):\n for result in self.cards:\n if result.hash_status:\n return True\n return False", "def hasBlackjack(self):\n return len(self.cards) == 2 and self.getPoints() == 21", "def match(self) -> bool:", "def deck_has_cards(deck, cards):\n deck_dict = collections.defaultdict(int)\n for card in itertools.chain(deck.draw_pile, deck.discard_pile, deck.hand):\n deck_dict[card] += 1\n return deck_dict == cards", "def has_cards(self):\n return self.hand.len() > 0", "def compare_element(self, card1, card2, card3, element):\n e1 = card1[element]\n e2 = card2[element]\n e3 = card3[element]\n if (e1 == e2 and e2 == e3) or (e1 != e2 and e1 != e3 and e2 != e3):\n # All the same or all different.\n return 1\n return 0", "def is_valid(current_card: Card, destination: Card) -> bool:\n # TODO: check for a card to a space is only Kings; maybe in the board?\n match = current_card.color == destination.color\n difference = destination.value - current_card.value\n if not match and difference == 1:\n return True\n else:\n return False", "def is_match(self, other_cpe):\n if not isinstance(other_cpe, CPE):\n return False\n\n if self.part == other_cpe.part and self.vendor == other_cpe.vendor:\n\n if other_cpe.product not in ['*', self.product]:\n return False\n if other_cpe.version not in ['*', self.version]:\n return False\n if other_cpe.update not in ['*', self.update]:\n return False\n if other_cpe.edition not in ['*', self.edition]:\n return False\n if other_cpe.language not in ['*', self.language]:\n return False\n if other_cpe.sw_edition not in ['*', self.sw_edition]:\n return False\n if other_cpe.target_sw not in ['*', self.target_sw]:\n return False\n if other_cpe.target_hw not in ['*', self.target_hw]:\n return False\n if other_cpe.other not in ['*', self.other]:\n return False\n\n return True\n else:\n return False", "def _check_effect_match(cls, card, effect):\n\t\tif effect:\n\t\t\tfor act in card.actions:\n\t\t\t\tif act.has_effect(effect):\n\t\t\t\t\treturn True\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True", "def matches(self, actual) -> bool:\n return self.matcher.matches(actual)", "def is_card_in_other_hands(self, own_hand_index, card):\n for i, hand in enumerate(self.hands):\n if i == own_hand_index:\n continue\n if card in hand:\n return True\n return False", "def matches(self):\n return False", "def validCard(card):\n card = card.strip()\n #print(\"Card: \"+card)\n #print(\"Card in hex: \" + \":\".join(\"{:02x}\".format(ord(c)) for c in card))\n \n if re.fullmatch(\"[0-9]{10}\", card):\n return hashlib.sha1(card.encode(\"ascii\")).hexdigest()\n else:\n return None", "def match(self, move):\n click1 = move[0]\n click2 = move[1]\n\n if click1['row'] == click2['row'] and click1['column'] == click2['column']:\n raise Exception(\"Corrupt move\") # \"Move is the same\"\n\n # Fetch the card id from playfield for the two different squares. If the id number in the two squares are the same\n # then we have a matching set of cards.\n id1 = self.get_card_id(click1)\n id2 = self.get_card_id(click2)\n move[0].update({'card': id1})\n move[1].update({'card': id2})\n return move, id1 == id2", "def check_card_type(cards):\n if isinstance(cards, str):\n cards = str2cards(cards)\n if not isinstance(cards, list):\n return False, None\n sorted_cards = sort_cards(cards)\n value = Doudizhu.DATA.get(cards2str(sorted_cards))\n if value is None:\n return False, ValueError('invalid card type')\n\n return True, value", "def cards_are_same(hand1, hand2):\n hand1_dict = {}\n hand2_dict = {}\n\n # List -> Dict conversion\n if isinstance(hand1, dict):\n hand1_dict = hand1\n else:\n for card in hand1:\n hand1_dict[card] = hand1_dict.get(card, 0) + 1\n if isinstance(hand2, dict):\n hand2_dict = hand2\n else:\n for card in hand2:\n hand2_dict[card] = hand2_dict.get(card, 0) + 1\n\n return hand1_dict == hand2_dict", "def has_matches(self):\n for rank, rank_count in self._rank_counts.items():\n if rank_count > 1:\n return True\n return False", "def __eq__(self, other):\n if not isinstance(other, CreditCardIdentificationCreditCard):\n return False\n\n return self.__dict__ == other.__dict__", "def has_any(self, name):\n counter = 0\n for element in self.cards:\n if name in str(element):\n counter += 1\n\n if counter > 0:\n return True\n else:\n return False", "def can_afford_card(self,\n card: Card) -> bool:\n price_after_discount = card.price % self.discount()\n missing_gems = 0\n for gem_color in GemColor:\n if gem_color != GemColor.GOLD:\n missing_gems += max(price_after_discount.value(gem_color) - self.gems_possessed.value(gem_color),0)\n return self.gems_possessed.value(GemColor.GOLD) >= missing_gems", "def check_pin(self, card_number, pin):\n database_cursor.execute(f\"SELECT pin FROM card WHERE number = {card_number};\")\n result = database_cursor.fetchall()\n print(result)\n return result[0][0] == pin", "def is_valid_retrieval(self, card_index):\n return card_index == 0", "def is_valid_retrieval(self, card_index):\n return card_index == 0", "def can_be_played(cls, card, context={}):\n\t\treturn True", "def __eq__(self, other):\n return(\n self.name == other.name and\n self.hand == other.hand and\n self.score == other.score\n )", "def has_won(self):\n coders_card = self.get_coders().get_amount()\n if coders_card > 3:\n return True\n else:\n return False", "def is_blackjack(self):\n if self.hand == 21 and len(list(self)) ==2:\n print '%s = Blackjack'%self\n return True", "def is_valid_foundation(current_card: Card, destination: Card) -> bool:\n # TODO: check for Ace to empty foundation slot; maybe in board?\n suit_match = current_card.suit == destination.suit\n difference = destination.value - current_card.value\n\n if suit_match and (difference == -1 or difference == 12):\n return True\n else:\n return False", "def build_deck_screen_my_deck_check_duplicate(card, local_store_list):\n for cd in local_store_list:\n if card.set_number == cd.set_number and card.card_number == cd.card_number:\n return True\n break\n return False", "def __le__(self, other: Card) -> bool:\n return compare_map[self.number] <= compare_map[other.number]", "def is_response_correct(self, response):\n for answer in self.my_osid_object.get_answers():\n if self._is_match(response, answer):\n return True\n return False", "def testCard(self):\n # test1\n cardObj1 = Card('A','d')\n self.assertEquals(1,cardObj1.get_rank())\n self.assertEquals('d',cardObj1.get_suit())\n # test2\n cardObj2 = Card('J','d')\n self.assertEquals(10,cardObj2.get_rank())\n # test3\n cardObj3 = Card(5,'d')\n self.assertEquals(5,cardObj3.get_rank())", "def smart_card_detected(self):\n\n session = self.__get_pkcs11_session()\n\n return False if session is None else True", "def check_card(self, card, issued_amount=None):\n\n # MODIF: issued_amount is only used for CardIssues as we need to know how much was issued in the bundle.\n # CONVENTION: voters' weight is the balance at the start block of current epoch\n\n debug = self.debug_donations\n\n if card.type == \"CardIssue\":\n if debug: print(\"PARSER: Checking validity of CardIssue\", card.txid, \"based on txid:\", card.donation_txid)\n\n # First step: Look for a matching DonationTransaction.\n dtx_id = card.donation_txid\n\n # check 1: filter out duplicates (less expensive, so done first)\n if (card.sender, dtx_id) in self.used_issuance_tuples:\n if debug: print(\"PARSER: Ignoring CardIssue: Duplicate or already processed part of CardBundle.\")\n return False\n\n # Check if it is a proposer or a donation issuance.\n # Proposers provide the ref_txid of their proposal transaction.\n # If this TX is in proposal_txes and they are the sender of the card and fulfill all requirements,\n # then the token is granted to them at their proposal address.\n\n if (dtx_id in self.valid_proposals) and self.validate_proposer_issuance(dtx_id, issued_amount, card.sender, card.blocknum):\n if debug: print(\"PARSER: DT CardIssue (Proposer):\", card.txid)\n\n elif self.validate_donation_issuance(dtx_id, issued_amount, card.sender):\n if debug: print(\"PARSER: DT CardIssue (Donation):\", card.txid)\n\n else:\n if debug: print(\"PARSER: Ignoring CardIssue: Invalid data.\")\n return False\n\n self.used_issuance_tuples.append((card.sender, dtx_id))\n return True\n\n else:\n\n if debug: print(\"PARSER: DT CardTransfer:\", card.txid)\n return True", "def is_one_rank_apart(card1, card2):\n def card_value(card):\n return 'A23456789TJQK'.index(solvers.deck.card_rank(card))\n\n pos1, pos2 = card_value(card1), card_value(card2)\n diff = abs(pos1 - pos2)\n return diff in (1, 12)", "def match(self, target, guess):\r\n return guess == target", "def compare_cards(self, guess):\n \n \"\"\"\n Compares cards to determine higher_lower, \n compares result with guess\n Args: \n self: : An instance of Dealer.\n self.card_1: int\n self.card_2: int\n guess: bool\n \"\"\"\n card_str_1 = self.get_card_str(self.card_1)\n card_str_2 = self.get_card_str(self.card_2)\n if guess: \n if self.card_1 == self.card_2:\n print(f\"{card_str_2} is equal to {card_str_1}\")\n self.player.score -= 75\n elif self.card_1 > self.card_2:\n print(f\"{card_str_2} is lower than {card_str_1}\")\n self.player.score -= 75\n elif self.card_1 < self.card_2:\n print(f\"{card_str_2} is higher than {card_str_1}\")\n self.player.score += 100\n if not guess:\n if self.card_1 == self.card_2:\n print(f\"{card_str_2} is equal to {card_str_1}\")\n self.player.score -= 75\n elif self.card_1 > self.card_2:\n print(f\"{card_str_2} is lower than {card_str_1}\")\n self.player.score += 100\n elif self.card_1 < self.card_2:\n print(f\"{card_str_2} is higher than {card_str_1}\")\n self.player.score -= 75", "def image_comparison(self):\n for result in self.cards:\n if result.image_status:\n return True\n return False", "def can_split(self) -> bool:\n if len(self.cards) == 2 and self.cards[0].value == self.cards[1].value:\n return True\n else:\n return False", "def matches(self, answer):\n return self.group_id == answer.group_id and \\\n self.block_id == answer.block_id and \\\n self.answer_id == answer.answer_id and \\\n self.group_instance == answer.group_instance and \\\n self.answer_instance == answer.answer_instance", "def guess(card1: dict, card2: dict) -> bool:\n print(f\"The current card is {card1['rank']} of {card1['suit']}\")\n selection = str(input('Will the next card be higher h or lower l?: '))\n if selection == 'h':\n return compare(card1, card2) < 0\n elif selection == 'l':\n return compare(card1, card2) > 0\n else:\n print(\"Type h or l\")\n return False", "def has_rank(self, rank):\n for card in self.cards:\n if card.rank == rank:\n return True\n return False", "def cardDiscardable(self, card):\n if self.cardDead(card):\n return True\n\n cardAttr = \"\"\n if Suit.toString(card.getSuit()) == \"white\":\n cardAttr = \"w\"\n elif Suit.toString(card.getSuit()) == \"blue\":\n cardAttr = \"b\"\n elif Suit.toString(card.getSuit()) == \"red\":\n cardAttr = \"r\"\n elif Suit.toString(card.getSuit()) == \"green\":\n cardAttr = \"g\"\n elif Suit.toString(card.getSuit()) == \"yellow\":\n cardAttr = \"y\"\n\n if card.getValue() == 1:\n cardAttr += \"1\"\n elif card.getValue() == 2:\n cardAttr += \"2\"\n elif card.getValue() == 3:\n cardAttr += \"3\"\n elif card.getValue() == 4:\n cardAttr += \"4\"\n elif card.getValue() == 5:\n cardAttr += \"5\"\n\n if card.getValue() == 1:\n if self.discardedDict[cardAttr] < 2:\n self.discardedDict[cardAttr] += 1\n # print(3 - self.discardedDict[cardAttr], \"card remaining for \", cardAttr)\n return True\n elif card.getValue() == 2 or card.getValue() == 3 or card.getValue() == 4:\n if self.discardedDict[cardAttr] < 1:\n self.discardedDict[cardAttr] += 1\n # print(2 - self.discardedDict[cardAttr], \"card remaining for \", cardAttr)\n return True\n elif card.getValue() == 5:\n if self.discardedDict[cardAttr] < 0:\n self.discardedDict[cardAttr] += 1\n # print(1 - self.discardedDict[cardAttr], \"card remaining for \", cardAttr)\n return True\n # print(\"Useful card\")\n return False", "async def check_if_ace(self, card_id, user_id):\n aces = [\"1\", \"14\", \"27\", \"40\"]\n aces_used = await self.get_aces_used(user_id)\n if card_id in aces and card_id not in aces_used:\n aces_used.append(card_id)\n await self.set_aces_used(aces_used, user_id)\n return True\n return False", "def __ne__(self, other: Card) -> bool:\n return not self.__eq__(other)", "def _resource_match(chromo, resource):\n return all(resource[k] == v for (k, v) in _resource_fields(chromo).items())", "def check_selected_card(_player1, _player2):\n if _player1.selected_card and _player2.selected_card:\n color = _player1.selected_card.suit\n if _player2.selected_card.suit != color and check_color_card(_player2, color):\n _player2.selected_card = None", "def is_ok_two_lines(line1, line2):\n card1 = line1[0]\n card2 = line1[1]\n card3 = line1[2]\n card4 = line2[0]\n card5 = line2[1]\n card6 = line2[2]\n idents1 = [card.ident for card in line1]\n idents2 = [card.ident for card in line2]\n intersection = list(set(idents1) & set(idents2))\n if intersection:\n return False\n if not is_coupled(card1.south, card4.north):\n return False\n if not is_coupled(card2.south, card5.north):\n return False\n if not is_coupled(card3.south, card6.north):\n return False\n return True", "def is_ok_line(line):\n card1 = line[0]\n card2 = line[1]\n card3 = line[2]\n\n if not is_coupled(card1.east, card2.west):\n return False\n if not is_coupled(card2.east, card3.west):\n return False\n return True", "def is_match(raw_string, cypher_string):\n \n if not len(raw_string) == len(cypher_string):\n return False\n \n pairs = {}\n \n for raw_char, cypher_char in zip(raw_string, cypher_string):\n \n if cypher_char in pairs and pairs[cypher_char] != raw_char:\n return False\n elif not raw_char in pairs:\n pairs[raw_char] = cypher_char\n elif raw_char in pairs and pairs[raw_char] != cypher_char:\n return False\n else:\n pass\n \n return True", "def __ge__(self, other: Card) -> bool:\n return not self.__lt__(other)", "def is_correct(self):\r\n score_dict = self.get_score()\r\n return score_dict['score'] == score_dict['total']", "def partial_match(self, other_product: Product):\n if self.product_id and other_product.product_id and self.product_id == other_product.product_id:\n return True\n if self.quote_currency and other_product.quote_currency and self.quote_currency == other_product.quote_currency:\n return True\n if self.base_currency and other_product.base_currency and self.base_currency == other_product.base_currency:\n return True\n return False", "def _is_matching(device: USBDevice, matcher: USBMatcher | USBCallbackMatcher) -> bool:\n if \"vid\" in matcher and device.vid != matcher[\"vid\"]:\n return False\n if \"pid\" in matcher and device.pid != matcher[\"pid\"]:\n return False\n if \"serial_number\" in matcher and not _fnmatch_lower(\n device.serial_number, matcher[\"serial_number\"]\n ):\n return False\n if \"manufacturer\" in matcher and not _fnmatch_lower(\n device.manufacturer, matcher[\"manufacturer\"]\n ):\n return False\n if \"description\" in matcher and not _fnmatch_lower(\n device.description, matcher[\"description\"]\n ):\n return False\n return True", "def differentiate_cards(card):\n\t\tdef High_Card(numbers,colors):\n\t\t\treturn len(set(numbers)) == 5\n\t\tdef One_Pair(numbers,colors):\n\t\t\treturn len(set(numbers)) == 4\n\t\tdef Two_Pairs(numbers,colors):\n\t\t\tif len(set(numbers)) != 3:\n\t\t\t\treturn False\n\t\t\treturn [numbers.count(i) for i in numbers].count(2) == 4\n\t\tdef Three_of_a_Kind(numbers,colors):\n\t\t\tif len(set(numbers)) != 3:\n\t\t\t\treturn False\n\t\t\tfor i in numbers:\n\t\t\t\tif numbers.count(i) == 3:\n\t\t\t\t\treturn True\n\t\t\treturn False\n\t\tdef Straight(numbers,colors):\n\t\t\tfor i in xrange(1,len(numbers)):\n\t\t\t\tif numbers[i] - numbers[i-1] != 1:\n\t\t\t\t\treturn False\n\t\t\treturn True\n\t\tdef Flush(numbers,colors):\n\t\t\treturn len(set(colors)) == 1\n\t\tdef Full_House(numbers,colors):\n\t\t\tnumbers_set = set(numbers)\n\t\t\tif len(numbers_set) != 2:\n\t\t\t\treturn False\n\t\t\ta = numbers[0]\n\t\t\tb= [x for x in numbers if x != a][0]\n\t\t\treturn (numbers.count(a) == 2 and numbers.count(b) == 3) or\\\n\t\t\t\t(numbers.count(a) == 3 and numbers.count(b) == 2)\n\t\tdef Four_of_a_Kind(numbers,colors):\n\t\t\tfor i in set(numbers):\n\t\t\t\tif numbers.count(i) == 4:\n\t\t\t\t\treturn True\n\t\t\treturn False\n\t\tdef Straight_Flush(numbers,colors):\n\t\t\treturn Straight(numbers,colors) and Flush(numbers,colors)\n\t\tdef Royal_Flush(numbers,colors):\n\t\t\tRoyal = [10,11,12,13,14]\n\t\t\treturn numbers == Royal and Flush(numbers,colors)\n\n\t\tcards = {'1':1,'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,'9':9,\n\t\t 'T':10,'t':10,'J':11,'j':11,'Q':12,'q':12,'K':13,'k':13,'A':14,'a':14}\n\t\tnumbers = [cards[i[0]] for i in card]\n\t\tnumbers.sort()\n\t\tcolors = [i[1] for i in card]\n\t\t\n\t\tif Royal_Flush(numbers,colors):return 9\n\t\telif Straight_Flush(numbers,colors):return 8\n\t\telif Four_of_a_Kind(numbers,colors):return 7\n\t\telif Full_House(numbers,colors):return 6\n\t\telif Flush(numbers,colors):return 5\n\t\telif Straight(numbers,colors):return 4\n\t\telif Three_of_a_Kind(numbers,colors):return 3\n\t\telif Two_Pairs(numbers,colors):return 2\n\t\telif One_Pair(numbers,colors):return 1\n\t\telif High_Card(numbers,colors):return 0", "def is_royal_flush(hand):\n\n # same suit\n suite = hand[0][1]\n count = {c:0 for c in cards.keys()}\n for c in hand:\n if suite != c[1]:\n return False\n count[c[0]] += 1\n # all in same suit\n for c in 'T J Q K A'.split():\n if count[c] != 1:\n return False\n return True", "def __gt__(self, other: Card) -> bool:\n return not self.__le__(other)", "def is_match(self, response):\n return response.find(' Matched') != -1", "def check_collide(self, card_):\n if len(self.cards) > 0:\n return self.cards[-1].check_collide(card_=card_)\n else:\n return card_.check_collide(pos=self.pos)", "def _match(self, key, attributes=None, context=None):\n matching_data = Sanitizer.ensure_int(self._get_matcher_input(key, attributes))\n if matching_data is None:\n return False\n return self.input_parsers[self._data_type](matching_data) == self._value", "def matches(self, smarts):\n return self.rdmol.HasSubstructMatch(MolFromSmarts(smarts))", "def check(card_number):\n if re.search(r'\\d{4} \\d{4} \\d{4} \\d{4}', card_number): \n if sum(int(c) for c in card_number.replace(\" \",\"\"))%10 == 0:\n return True", "def is_winner(self, given_letter):\n if self.check_diagonal_1(given_letter)[0] == self.size or \\\n self.check_diagonal_2(given_letter)[0] == self.size:\n return True\n for number in self.numbers:\n if self.check_row(number, given_letter)[0] == self.size:\n return True\n for letter in self.letters:\n if self.check_column(letter, given_letter)[0] == self.size:\n return True\n return False", "def check_for_blackjack(self):\n if (self.dealer.hand.value + self.dealer.face_down.value) == 21:\n if self.player.hand.blackjack:\n return self.blackjack_push()\n else:\n return self.blackjack_dealer_win()\n\n if self.player.hand.blackjack():\n return self.blackjack_player_win()\n lost_insurance_bet(self.side_bet)\n return False", "def test_suit(self):\n card = self._card\n self.assertEqual(card.suit, self._suit)", "def check(self, answer):\n return self.answer == answer", "def dealer_matching(self):\n if len([card for card in self.dealer_hand if card[1] == '8']) > 0:\n self.discard_pile = [card for card in self.dealer_hand if card[1] == '8'][0]\n self.dealer_hand.remove(self.discard_pile)\n dealer_suits = [card[0] for card in self.dealer_hand]\n self.new_suit = max(set(dealer_suits), key=dealer_suits.count)\n print(\"\\nNew suit is :\", self.new_suit)\n return 1\n if self.new_suit != '':\n matching = []\n for card in self.dealer_hand:\n if card[0] == self.new_suit:\n matching.append(card)\n if len(matching) > 0:\n matching_values = list(map(self.card_value, matching))\n self.discard_pile = matching[matching_values.index(max(matching_values))]\n self.dealer_hand.remove(self.discard_pile)\n self.new_suit = ''\n return 1\n else:\n return 0\n if self.new_suit == '':\n matching = []\n for card in self.dealer_hand:\n if card[0] == self.discard_pile[0] or card[1] == self.discard_pile[1]:\n matching.append(card)\n if len(matching) > 0:\n matching_values = list(map(self.card_value, matching))\n self.discard_pile = matching[matching_values.index(max(matching_values))]\n self.dealer_hand.remove(self.discard_pile)\n return 1\n else:\n return 0", "def is_valid_deck(deck):\n \n flag = True\n test_deck = []\n for i in range(1, len(deck) + 1):\n test_deck.append(i)\n for value in deck:\n if value not in test_deck:\n flag = False\n return flag", "def is_pair(hand):\n\tis_a_pair = False\n\ti = 0\n\twhile i < 13:\n\t\tif hand[i] == 2:\n\t\t\tis_a_pair = True\n\t\ti += 1 \n\thigh_card = 0\n\tj = 0\n\twhile j < 13 and is_a_pair == True:\n\t\tif hand[j] == 2 and j >= high_card:\n\t\t\thigh_card = j\n\t\tj += 1\n\tif is_a_pair:\n\t\treturn True, high_card\n\telse:\n\t\treturn False", "def test_deck_contains_all_cards(self):\n\n # I'm using collections.Counter so that the order is ignored (as in a\n # set) but that multiples are accounted for.\n expected = collections.Counter([\n ('r', 'i'), ('r', 'i'), ('r', 'i'),\n ('r', 2), ('r', 3), ('r', 4), ('r', 5), ('r', 6), \n ('r', 7), ('r', 8), ('r', 9), ('r', 10),\n\n ('g', 'i'), ('g', 'i'), ('g', 'i'),\n ('g', 2), ('g', 3), ('g', 4), ('g', 5), ('g', 6),\n ('g', 7), ('g', 8), ('g', 9), ('g', 10),\n\n ('b', 'i'), ('b', 'i'), ('b', 'i'),\n ('b', 2), ('b', 3), ('b', 4), ('b', 5), ('b', 6),\n ('b', 7), ('b', 8), ('b', 9), ('b', 10),\n\n ('y', 'i'), ('y', 'i'), ('y', 'i'),\n ('y', 2), ('y', 3), ('y', 4), ('y', 5), ('y', 6),\n ('y', 7), ('y', 8), ('y', 9), ('y', 10),\n\n ('w', 'i'), ('w', 'i'), ('w', 'i'),\n ('w', 2), ('w', 3), ('w', 4), ('w', 5), ('w', 6),\n ('w', 7), ('w', 8), ('w', 9), ('w', 10), ])\n\n self.assertEqual(expected, collections.Counter(deck.deck_gen()))", "def do_passwords_match(self, password1, password2):\n return password1 == password2", "def is_valid_deck(deck_of_cards):\n new_deck = deck_of_cards[:]\n new_deck.sort()\n \n for i in range(len(new_deck)):\n if new_deck[i] != (i + 1):\n return False\n return True\n # Checks to see if each value from 1 to number of cards in the given deck \n # appears once.", "def player_hand_contains_suit(self, user_id, suit):\n print \"player_hand_contains_suit(self, user_id, suit) \"\n print \" Checking if player hand contains expected suit: {}\".format(self.bot.leading_suit)\n for user_object in self.bot.current_game.players:\n if user_object.id == user_id:\n card_value = None\n card_suit = None\n for card_obj in user_object.cards_in_hand:\n if len(card_obj) == 2:\n card_value = str(card_obj[0])\n card_suit = card_obj[1]\n else:\n card_value = str(card_obj)\n card_suit = None\n if \"d_\" not in card_value and \"t_\" not in card_value and \"vm_\" not in card_value:\n if card_suit == suit:\n return True\n return False", "def check_card_action(self, card):\n if card.value == \"7\":\n self.seven_punishment()\n elif card.value == \"8\":\n self.eight_punishment()\n elif card.value == \"9\":\n self.nine_punishment()\n elif card.value == \"B\":\n self.jack_wish()", "def is_high_card(hand):\n\tis_a_high_card = True\n\ti = 0\n\twhile i < 13:\n\t\tif hand[i] > 1:\n\t\t\tis_high_card = False\n\t\ti += 1\n\t\t\n\thigh_card = 0\n\tj = 0\n\twhile j < 13 and is_a_high_card == True:\n\t\tif hand[j] == 1 and j >= high_card:\n\t\t\thigh_card = j\n\t\tj += 1\n\tif is_a_high_card:\n\t\treturn True, high_card\n\telse:\n\t\treturn False", "def cards_matching_conditions(self, *conditions):\n\n results = Counter()\n for name, num in self.contents.items():\n if all(condition(self.cards.get(name)) for condition in\n conditions):\n results[name] = num\n\n return results", "def check_banned(deck_format, card_name):\n if card_name in consts.BANNINGS[deck_format]:\n return True\n return False", "def is_winner(self, mark):\n # row matching\n for row in self.storage:\n if row[0] == row[1] == row[2] == mark:\n return True\n \n # col matching\n for col in range(3):\n if self.storage[0][col] == self.storage[1][col] == self.storage[2][col] == mark:\n return True\n \n # diagonal matching\n if self.storage[0][0] == self.storage[1][1] == self.storage[2][2] == mark:\n return True\n \n # reverse diagonal matching\n if self.storage[0][2] == self.storage[1][1] == self.storage[2][0] == mark:\n return True\n \n return False" ]
[ "0.78050846", "0.7451856", "0.727455", "0.7232319", "0.718153", "0.7173871", "0.713753", "0.7100822", "0.67850065", "0.6778596", "0.6773867", "0.674615", "0.6730441", "0.66842115", "0.6663965", "0.6543529", "0.6535424", "0.6506496", "0.648398", "0.64583373", "0.6433088", "0.641239", "0.6393447", "0.6389675", "0.63526195", "0.6300056", "0.6273384", "0.62630904", "0.61931187", "0.61919403", "0.6150027", "0.6128519", "0.6097636", "0.60907334", "0.60443145", "0.6023683", "0.6022052", "0.6007801", "0.59823495", "0.59709835", "0.59696656", "0.59629446", "0.59555256", "0.5939438", "0.5939438", "0.59339637", "0.5874811", "0.5853063", "0.5850293", "0.5839724", "0.58335584", "0.5829262", "0.5810398", "0.579849", "0.57981575", "0.57742053", "0.57678956", "0.57627267", "0.5761948", "0.5756107", "0.57422554", "0.57369536", "0.5730628", "0.56924564", "0.56853485", "0.5676642", "0.5654714", "0.56420666", "0.5631723", "0.5625112", "0.5620904", "0.5612199", "0.5592954", "0.55796796", "0.55707026", "0.5566978", "0.55644274", "0.5561255", "0.55467045", "0.55418456", "0.5512515", "0.5512405", "0.55066895", "0.55042195", "0.55030024", "0.5496536", "0.548937", "0.5477153", "0.5456232", "0.5451607", "0.5446947", "0.54451716", "0.54441583", "0.54433113", "0.54406464", "0.54366434", "0.5425672", "0.54203576", "0.54103124", "0.54068375" ]
0.8583878
0
Ensures that chosen_card is an acceptable match, given the active_card and active_suit
def _validate_card_match(self, chosen_card, active_card, active_suit): return chosen_card.is_match(active_card) or chosen_card.suit == active_suit
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_chosen_card(self, allowed_cards, chosen_card):\n if self.action is not None:\n if self.action in allowed_cards:\n logger.info(f\"Successfully chose the card: {self.action}\")\n chosen_card = self.action\n else:\n logger.error(f\"{self.action} is not a valid card! Choosing the first allowed card now.\")\n else:\n logger.debug(\"chosen card is None\")\n return chosen_card", "def is_match(self, card):\n\t\treturn self.suit == card.suit or self.value == card.value", "def check_selected_card(_player1, _player2):\n if _player1.selected_card and _player2.selected_card:\n color = _player1.selected_card.suit\n if _player2.selected_card.suit != color and check_color_card(_player2, color):\n _player2.selected_card = None", "def choose_validator(payload, chosen):\n _has_theme = has_theme(payload[\"cards\"], payload[\"theme\"])\n special_tuple = (\n SkullEnum.WHITE,\n SkullEnum.MERMAID,\n SkullEnum.PIRATE,\n SkullEnum.GREENPIRATE,\n SkullEnum.SKULLKING,\n )\n\n if not chosen.isdecimal():\n print(f\"Choose a number between 1 and {len(payload['cards'])}\")\n return False\n if not (1 <= int(chosen) <= len(payload[\"cards\"])):\n print(f\"Choose a number between 1 and {len(payload['cards'])}\")\n return False\n if (\n _has_theme\n and payload[\"cards\"][int(chosen) - 1].CARDTYPE not in special_tuple\n and payload[\"cards\"][int(chosen) - 1].CARDTYPE != payload[\"theme\"]\n ):\n print(\n f\"You have a card of the theme {payload['theme']}. You must choose that card\"\n )\n return False\n\n return True", "def test_suit(self):\n card = self._card\n self.assertEqual(card.suit, self._suit)", "def dealer_matching(self):\n if len([card for card in self.dealer_hand if card[1] == '8']) > 0:\n self.discard_pile = [card for card in self.dealer_hand if card[1] == '8'][0]\n self.dealer_hand.remove(self.discard_pile)\n dealer_suits = [card[0] for card in self.dealer_hand]\n self.new_suit = max(set(dealer_suits), key=dealer_suits.count)\n print(\"\\nNew suit is :\", self.new_suit)\n return 1\n if self.new_suit != '':\n matching = []\n for card in self.dealer_hand:\n if card[0] == self.new_suit:\n matching.append(card)\n if len(matching) > 0:\n matching_values = list(map(self.card_value, matching))\n self.discard_pile = matching[matching_values.index(max(matching_values))]\n self.dealer_hand.remove(self.discard_pile)\n self.new_suit = ''\n return 1\n else:\n return 0\n if self.new_suit == '':\n matching = []\n for card in self.dealer_hand:\n if card[0] == self.discard_pile[0] or card[1] == self.discard_pile[1]:\n matching.append(card)\n if len(matching) > 0:\n matching_values = list(map(self.card_value, matching))\n self.discard_pile = matching[matching_values.index(max(matching_values))]\n self.dealer_hand.remove(self.discard_pile)\n return 1\n else:\n return 0", "def compare_cards(self, guess):\n \n \"\"\"\n Compares cards to determine higher_lower, \n compares result with guess\n Args: \n self: : An instance of Dealer.\n self.card_1: int\n self.card_2: int\n guess: bool\n \"\"\"\n card_str_1 = self.get_card_str(self.card_1)\n card_str_2 = self.get_card_str(self.card_2)\n if guess: \n if self.card_1 == self.card_2:\n print(f\"{card_str_2} is equal to {card_str_1}\")\n self.player.score -= 75\n elif self.card_1 > self.card_2:\n print(f\"{card_str_2} is lower than {card_str_1}\")\n self.player.score -= 75\n elif self.card_1 < self.card_2:\n print(f\"{card_str_2} is higher than {card_str_1}\")\n self.player.score += 100\n if not guess:\n if self.card_1 == self.card_2:\n print(f\"{card_str_2} is equal to {card_str_1}\")\n self.player.score -= 75\n elif self.card_1 > self.card_2:\n print(f\"{card_str_2} is lower than {card_str_1}\")\n self.player.score += 100\n elif self.card_1 < self.card_2:\n print(f\"{card_str_2} is higher than {card_str_1}\")\n self.player.score -= 75", "def test_play_card(self):\n self.plr.piles[Piles.DECK].set(\"Silver\", \"Province\", \"Moat\", \"Gold\")\n self.vic.piles[Piles.DECK].set(\"Duchy\")\n self.plr.test_input = [\"discard\", \"discard\", \"putback\"]\n self.plr.play_card(self.card)\n self.g.print_state()\n self.assertEqual(self.plr.actions.get(), 1)\n self.assertIn(\"Duchy\", self.vic.piles[Piles.DISCARD])\n self.assertIn(\"Gold\", self.plr.piles[Piles.DISCARD])\n self.assertIn(\"Province\", self.plr.piles[Piles.HAND])\n self.assertIn(\"Moat\", self.plr.piles[Piles.HAND])\n self.assertIn(\"Silver\", self.plr.piles[Piles.DECK])", "def check_cards_eligibility(self):\n for c in self.hand:\n c.check_actions(self)\n for c in self.phand:\n c.check_actions(self)\n for c in self.discard:\n c.check_actions(self)\n for c in self.active_player.phand:\n c.check_actions(self)\n for c in self.active_player.hand:\n c.check_actions(self)\n for c in self.active_player.discard:\n c.check_actions(self)\n for c in self.played_user_cards:\n c.check_actions(self)\n if ACTION_KEEP in self.actions:\n for p in self.players:\n for c in p.phand:\n c.check_actions(self)\n for c in p.hand:\n c.check_actions(self)\n for c in p.discard:\n c.check_actions(self)", "def choose_card(playable_cards):\r\n\r\n playing = playable_cards[0]\r\n print('\\n choosing \\n', playing)\r\n\r\n return playing # for now\r", "def followUpAttack(self, validCards):\n print(\"Select card from... \")\n cardManager.printHand(validCards)\n card = int(input(\"to your attack: \"))\n while card not in validCards: # error checking\n print(card)\n print(\"Please select a valid card from...\")\n cardManager.printHand(validCards)\n card = int(input(\"to your attack: \"))\n self.currentHand.remove(card)\n card = self.checkDoubles(card)\n return card", "def guess(card1: dict, card2: dict) -> bool:\n print(f\"The current card is {card1['rank']} of {card1['suit']}\")\n selection = str(input('Will the next card be higher h or lower l?: '))\n if selection == 'h':\n return compare(card1, card2) < 0\n elif selection == 'l':\n return compare(card1, card2) > 0\n else:\n print(\"Type h or l\")\n return False", "def _check_suit_or_value_match(cls, card1, card2):\n\t\tsuit_match, value_match = False, False\n\t\tif (card1.suit == card2.suit) or (card2.suit == constants.CARD_BLACK) or (card1.suit == constants.CARD_BLACK):\n\t\t\tsuit_match = True\n\t\tif card1.value == card2.value:\n\t\t\tvalue_match = True\n\t\treturn suit_match or value_match", "def test_card_suit(mock_card):\n assert mock_card.suit == Suit.SPADE", "def choose_card(self, state=None):\n # if self.at_last_stich():\n # allowed = yield self.cards[0]\n # else:\n self.observation_received.acquire()\n self.observation = self.build_observation(state, self.cards)\n logger.debug(f\"choose_card received observation: {self.observation}\")\n self.observation_received.notify_all() # notify all threads to be sure\n self.observation_received.release()\n\n self.action_received.acquire()\n received = self.action_received.wait()\n if not received:\n logger.debug(\"Timeout occurred. action_received condition has not been notified.\")\n logger.debug(f\"choose_card received action: {self.action}\")\n allowed_cards = self.allowed_cards(state=state)\n chosen_card = allowed_cards[0] # set chosen_card to the first allowed card in case anything goes south\n chosen_card = self.set_chosen_card(allowed_cards, chosen_card)\n self.action_received.release()\n\n allowed = yield chosen_card\n\n if allowed:\n yield None", "def dealer_card_choice(update, context):\n query = update.callback_query\n if query.message.reply_to_message:\n user = query.message.reply_to_message.from_user\n else:\n user = query.message.chat\n bot = context.bot\n CURRENT_USER = USERS[user.username]\n CURRENT_CONTEXT = process_card_value(query.data, CURRENT_USER)\n message = f'Round: {CURRENT_CONTEXT[\"round\"]} ({CURRENT_CONTEXT[\"username\"]}) \\nDealers Card: {CURRENT_CONTEXT[\"dealer_card\"]}\\nYour Cards: {CURRENT_CONTEXT[\"player_cards\"]} \\nYour total: {CURRENT_CONTEXT[\"player_total\"]} \\n\\nChoose Dealers Card: '\n bot.edit_message_text(\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n text=message,\n reply_markup=card_markup\n )\n\n # Tell ConversationHandler that we're in state `STRATEGY` now\n return STRATEGY", "def take_comp_turn(self, deck, pile):\n matches = [card for card in self.hand if card.is_match(pile.top_card() != 0)]\n if len(matches) > 0: # can play\n choice = random.randrange(len(matches))\n self.play_card(matches[choice-1], pile)\n if matches[choice - 1].kind == 'wild' or matches[choice - 1].kind == 'wild4':\n chosencolor = random.choice(['red', 'yellow', 'green', 'blue'])\n matches[choice - 1].color = chosencolor\n print(\"The color is now \" + str(chosencolor) + \".\")\n print(str(self.name) + \" played \" + str(matches[choice-1]))\n\n else: # comp can't play\n # check if deck is empty -- if so, reset it\n if deck.is_empty():\n deck.reset_deck(pile)\n # draw a new card from the deck\n newcard = self.draw_card(deck)\n print(\"The computer drew: \" + str(newcard))\n if newcard.is_match(pile.top_card()): # can be played\n self.play_card(newcard, pile)\n if newcard.kind == 'wild':\n chosencolor = random.choice(['red', 'yellow', 'green', 'blue'])\n newcard.color = chosencolor\n print(\"The color is now \" + str(chosencolor) + \".\")\n else: # still can't play\n print(\"Sorry, you still can't play.\")\n print(str(self.name) + \" played \" + str(newcard))\n return", "def test_play_no_gain(self):\n self.card = self.g[\"Festival\"].remove()\n self.plr.piles[Piles.HAND].set(\"Duchy\")\n self.plr.add_card(self.card, Piles.HAND)\n self.plr.favors.set(2)\n self.plr.test_input = [\"No\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.favors.get(), 2)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 1)", "def can_play(self, card):\n played_cards = map(lambda x: str(x).lower(), self.played_cards)\n if str(card).lower() in played_cards:\n return False\n if card.prebuild in played_cards:\n return True\n\n for res in card.cost", "def deal(self):\n self.dealer.hit(self.deck)\n self.dealer.hit(self.deck)\n self.player.hit(self.deck)\n self.player.hit(self.deck)\n\n if self.player.sum_cards() == 21:\n self.round_winner = True\n self.print_hands()\n print(\"BLACKJACK! You win!\")", "def test_play(self):\n self.plr.piles[Piles.DECK].set(\"Province\")\n self.plr.add_card(self.card, Piles.HAND)\n self.plr.test_input = [\"keep\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.coins.get(), 2)\n self.assertIn(\"Province\", self.plr.piles[Piles.DECK])\n self.assertNotIn(\"Province\", self.plr.piles[Piles.DISCARD])", "def compare_cards(board, eng_card, scot_card, eng_type, scot_type, eng_parameter, scot_parameter):\n\n\n \n year_ends_early = False\n\n \n if get_card_val(eng_card) > get_card_val(scot_card):\n who_goes_first = 'ENGLAND'\n \n elif get_card_val(eng_card) < get_card_val(scot_card):\n who_goes_first = 'SCOTLAND'\n \n elif get_card_val(eng_card) == get_card_val(scot_card):\n \n who_goes_first = 'ENGLAND'\n \n if get_card_val(eng_card) == 4 and get_card_val(scot_card) == 4:\n year_ends_early = True\n \n board.who_goes_first = who_goes_first\n\n eng_played_truce = False\n if eng_card == 'TRU':\n eng_played_truce = True\n\n scot_played_truce = False\n if scot_card == 'TRU':\n scot_played_truce = True\n\n if who_goes_first == 'ENGLAND':\n\n resolve_card(board, eng_type, scot_type, eng_card, 'ENGLAND', eng_parameter, scot_played_truce)\n resolve_card(board, eng_type, scot_type, scot_card, 'SCOTLAND', scot_parameter, eng_played_truce)\n \n elif who_goes_first == 'SCOTLAND':\n \n resolve_card(board, eng_type, scot_type, scot_card, 'SCOTLAND', scot_parameter, eng_played_truce)\n resolve_card(board, eng_type, scot_type, eng_card, 'ENGLAND', eng_parameter, scot_played_truce)\n \n return who_goes_first, year_ends_early", "def check_color_card(player, color):\n for card in player.cards:\n if card.suit == color:\n return True", "def take_turn(self):\n \n self.card_1 = self.get_card()\n self.display_card_1()\n guess = self.player.higher_lower()\n self.card_2 = self.get_card()\n self.display_card_2()\n self.compare_cards(guess)\n self.player.print_score()\n if self.player.score > 0:\n self.can_deal = self.player.keep_playing()\n print(\"\\n\")\n else:\n self.can_deal = False\n print(\"Game overThanks for playing!\")", "def test_discard_buy(self):\n self.plr.test_input = [\"finish selecting\", \"discard gold\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 2)\n self.assertEqual(self.plr.actions.get(), 1)\n self.assertEqual(self.plr.buys.get(), 2)\n self.assertNotIn(\"Gold\", self.plr.piles[Piles.HAND])", "def test_gain(self):\n self.plr.piles[Piles.DECK].set(\"Duchy\")\n self.plr.test_input = [\"Get Estate\"]\n self.plr.gain_card(\"Cursed Village\")\n self.assertNotIn(\"Curse\", self.plr.piles[Piles.DISCARD])\n self.assertIsNotNone(self.plr.piles[Piles.DISCARD][\"Estate\"])\n self.assertIn(\"Duchy\", self.g.trashpile)", "def test_play(self):\n self.card = self.g[\"Festival\"].remove()\n self.plr.piles[Piles.HAND].set(\"Duchy\")\n self.plr.add_card(self.card, Piles.HAND)\n self.plr.favors.set(2)\n self.plr.test_input = [\"Gain\"]\n self.plr.play_card(self.card)\n self.g.print_state()\n self.assertEqual(self.plr.favors.get(), 1)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 1 + 1)", "def choose_hand(hand, deck):\n possible = list()\n for c in combinations(hand, 4):\n possible.append([Cribbage.expected_score(list(c), deck), c])\n best = max(possible, key = lambda i : i[0])\n discard = list(set(hand) - set(best[1]))\n return best[1], discard", "def test_seven_cards_poker(self):\n self.assertEqual(best_hand(\"6C 7C 8C 9C TC 5C JS\".split()),\n ('6C', '7C', '8C', '9C', 'TC'))\n self.assertEqual(best_hand(\"TD TC TH 7C 7D 8C 8S\".split()),\n ('TD', 'TC', 'TH', '8C', '8S'))\n self.assertEqual(best_hand(\"JD TC TH 7C 7D 7S 7H\".split()),\n ('JD', '7C', '7D', '7S', '7H'))", "def indicate_discard_card(whose_turn,players):\n cards_to_choose_from = players[whose_turn].hand.cards\n players[whose_turn].hand.print_cards()\n chosen_to_discard = int(input('Select a card to discard. Type a number. '))\n return chosen_to_discard", "def deal_card():\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n player = random.choice(cards)\n return player", "def cardDiscardable(self, card):\n if self.cardDead(card):\n return True\n\n cardAttr = \"\"\n if Suit.toString(card.getSuit()) == \"white\":\n cardAttr = \"w\"\n elif Suit.toString(card.getSuit()) == \"blue\":\n cardAttr = \"b\"\n elif Suit.toString(card.getSuit()) == \"red\":\n cardAttr = \"r\"\n elif Suit.toString(card.getSuit()) == \"green\":\n cardAttr = \"g\"\n elif Suit.toString(card.getSuit()) == \"yellow\":\n cardAttr = \"y\"\n\n if card.getValue() == 1:\n cardAttr += \"1\"\n elif card.getValue() == 2:\n cardAttr += \"2\"\n elif card.getValue() == 3:\n cardAttr += \"3\"\n elif card.getValue() == 4:\n cardAttr += \"4\"\n elif card.getValue() == 5:\n cardAttr += \"5\"\n\n if card.getValue() == 1:\n if self.discardedDict[cardAttr] < 2:\n self.discardedDict[cardAttr] += 1\n # print(3 - self.discardedDict[cardAttr], \"card remaining for \", cardAttr)\n return True\n elif card.getValue() == 2 or card.getValue() == 3 or card.getValue() == 4:\n if self.discardedDict[cardAttr] < 1:\n self.discardedDict[cardAttr] += 1\n # print(2 - self.discardedDict[cardAttr], \"card remaining for \", cardAttr)\n return True\n elif card.getValue() == 5:\n if self.discardedDict[cardAttr] < 0:\n self.discardedDict[cardAttr] += 1\n # print(1 - self.discardedDict[cardAttr], \"card remaining for \", cardAttr)\n return True\n # print(\"Useful card\")\n return False", "def test_consumed_cards(self):\n game = TestGames.replay(9, [3, 1, 0, 0])\n consumed_cards = game.consumed_cards()\n self.assertEqual(len(consumed_cards), 8)\n\n self.assertListEqual(list(consumed_cards),\n [2 / 5, # guards\n 0 / 2, # priest\n 1 / 2, # baron\n 0 / 2, # handmaid\n 1 / 2, # prince\n 0 / 1, # king\n 0 / 1, # countess\n 0 / 1]) # princess", "def test_deal_insufficient_cards(self):\n cards = self.deck._deal(100)\n self.assertEqual(len(cards), 52)\n self.assertEqual(self.deck.count(), 0)", "def deal_card():\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n card = random.choice(cards)\n return card", "def buy_card(self):\n\n print(f\"Hand has buying power {self.hand_buying_power}...\")\n bought_card = None\n\n # by Platinium, if possible\n # otherwise (game stage agnostic) can buy a province or colony, always buy it\n if ((self.highest_buyable_money == cards.PLATINUM) and\n (self.game_stage == GameStage.early_game)):\n bought_card = cards.PLATINUM\n elif ((self.highest_buyable_victory_points == cards.PROVINCE) or\n (self.highest_buyable_victory_points == cards.COLONY)):\n bought_card = self.highest_buyable_victory_points\n else:\n # buy the highest buyable money by default\n if (self.highest_buyable_money != cards.COPPER):\n bought_card = self.highest_buyable_money\n\n # except if in the late game stage, in which case buy the highest\n # buyable victory points instead\n if ((self.game_stage == GameStage.late_game) and\n (self.highest_buyable_victory_points) and\n (self.highest_buyable_victory_points.victory_points > 0)):\n bought_card = self.highest_buyable_victory_points\n print(f\"Late Stage Game, so buying victory points over money\")\n\n # explain the play\n self.speak_hand()\n s = f\"for total buying power of {self.hand_buying_power}\"\n self.game.speak_str(s)\n\n # gain the card bought, if any, to the discard pile:\n if bought_card:\n s = f\"I buy {bought_card.name}\"\n self.game.speak_str(s)\n\n # gain the card to the discard pile\n self.deck.discard.append(bought_card)\n self.game.buy_card(bought_card)\n else:\n s = f\"I do not buy anything\"\n self.game.speak_str(s)\n\n # the whole hand is used up buying the card, discard the hand\n self.deck.discard_hand()", "def is_valid_foundation(current_card: Card, destination: Card) -> bool:\n # TODO: check for Ace to empty foundation slot; maybe in board?\n suit_match = current_card.suit == destination.suit\n difference = destination.value - current_card.value\n\n if suit_match and (difference == -1 or difference == 12):\n return True\n else:\n return False", "def choose_card_to_discard(self):\n random.choice(self.hand.card_list).use()", "def deal_card():\r\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\r\n return (random.choice(cards))", "def player_hit(self):\n self.player.hit(self.deck)\n self.print_hands()\n \n if self.player.sum_cards() > 21:\n self.round_winner = True\n self.print_hands()\n print(\"BUST! Dealer wins.\")", "def resolve_card(board, eng_type, scot_type, card, role, parameter, truce = False):\n\n if role == 'ENGLAND':\n which_side = eng_type\n elif role == 'SCOTLAND':\n which_side = scot_type\n\n\n if card == '1':\n movement_execution(board, which_side, role, int(card), truce)\n elif card == '2':\n movement_execution(board, which_side, role, int(card), truce)\n elif card == '3':\n movement_execution(board, which_side, role, int(card), truce)\n\n else:\n\n if role == 'ENGLAND' or not scottish_king.run_king(board, eng_type, scot_type):\n \n \n \n if card == 'SEA':\n \n if play_pass(which_side) == 'play':\n sea_execution(board, which_side, role)\n \n \n elif card == 'HER':\n \n if play_pass(which_side) == 'play':\n her_execution(board, which_side, role, eng_type, scot_type)\n \n \n elif card == 'VIC':\n if play_pass(which_side) == 'play':\n vic_execution(board, which_side, role, parameter)\n \n \n elif card == 'PIL':\n \n if play_pass(which_side) == 'play':\n pil_execution(board, which_side, role, parameter)\n \n \n elif card == 'TRU':\n \n if play_pass(which_side) == 'play':\n return True", "def deal_card():\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n random_card = random.choice(cards)\n return random_card", "def __eq__(self, other_card):\n if self.rank == other_card.rank or self.suit == other_card.suit:\n return True\n else:\n return False", "def is_card_playable(self, card):\n color_index = COLOR.index(card[0])\n return len(self.firework[color_index]) == int(card[1]) - 1", "def does_player_have_card(self, player, card):\n return card in self.hands[player]", "def validate_cards(self, cards_list):\n return set(self.hand).issubset(set(cards_list))", "def test_play_bane(self):\n self.victim.piles[Piles.HAND].set(\"Copper\", \"Silver\", self.g._bane)\n self.attacker.piles[Piles.HAND].set(\n \"Copper\", \"Silver\", \"Gold\", \"Duchy\", \"Province\"\n )\n self.attacker.add_card(self.card, Piles.HAND)\n self.attacker.test_input = [\"Duchy\", \"Province\", \"finish\"]\n self.attacker.play_card(self.card)\n try:\n self.assertNotIn(\"Curse\", self.victim.piles[Piles.DISCARD])\n except AssertionError: # pragma: no cover\n print(f\"Bane={self.g._bane}\")\n self.g.print_state()\n raise", "def user_turn(self):\r\n\r\n self.display_state() # display the current state\r\n print(\r\n '\\nTURN: You -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-')\r\n # Get the row and col number of the card you want to select\r\n x1, y1 = self.input_validation('Enter the location of the first card you pick (row, col) -> ')\r\n self.selected = [x1, y1] # a temporary holder for the first choice\r\n\r\n # Get the corresponding card ID which is also the key for the dictionary with all the cards\r\n choice1_key = self.state[x1, y1]\r\n print('The card you selected: {0}'.format(self.deck[choice1_key]))\r\n\r\n # Repeat this for your second choice\r\n x2, y2 = self.input_validation('Enter the location of the second card you pick (row, col) -> ')\r\n self.selected = [-1, -1] # reset the temporary hold\r\n\r\n choice2_key = self.state[x2, y2]\r\n print('The card you selected: {0}'.format(self.deck[choice2_key]))\r\n\r\n # Check if the two cards are a match or not\r\n if self.check_card(self.deck[choice1_key], self.deck[choice2_key]):\r\n print('MATCH')\r\n # Replace the corresponding cards in the remaining inventory and state with -1\r\n self.remaining[choice1_key] = -1\r\n self.remaining[choice2_key] = -1\r\n self.state[x1, y1] = -1\r\n self.state[x2, y2] = -1\r\n self.player_cards += 2 # the player gets 2 cards\r\n self.bin.append([x1, y1]) # move the location of the card to the already-taken bin\r\n self.bin.append([x2, y2])\r\n self.forget_memory(choice1_key) # remove from computer's memory\r\n self.forget_memory(choice2_key)\r\n self.match = 1 # player will continue to choose cards\r\n else:\r\n print('NOT a match')\r\n # Add these cards to the computer's memory\r\n self.computer_memory[choice1_key] = [x1, y1]\r\n self.computer_memory[choice2_key] = [x2, y2]\r\n self.match = 0 # computer's turn\r", "def hit(player):\n deal_random_card(player)", "def _deal_player():\n\n # we append the dealed card to the player's hand.\n player_hand.append(_deal_card(player_card_frame))\n\n # calculate and return the score of the player's hand.\n player_score = _score_hand(player_hand)\n\n # set the score to the respective label.\n player_score_label.set(player_score)\n\n # if the score surpasses 21, dealer wins.\n if player_score > 21:\n result_text.set(\"Dealer wins!\")", "def player_discard(self, inpt):\n \n if inpt.isdigit() == False:\n return 0\n if int(inpt) > len(self.player_hand):\n print(\"\\nNumber of card entered is greater than number of cards\")\n print(\"Please try again \\n\")\n return 0\n if self.player_hand[int(inpt)-1][1] == '8':\n self.discard_pile = self.player_hand.pop(int(inpt)-1)\n self.new_suit = ''\n while self.new_suit not in ['h','d','s','c']:\n self.new_suit = input(\"Please enter new suit: h, d, s, c\\n\")\n print(\"\\nNew suit is: \", self.new_suit)\n return 1\n if self.new_suit != '':\n if self.player_hand[int(inpt)-1][0] == self.new_suit:\n self.discard_pile = self.player_hand.pop(int(inpt)-1)\n self.new_suit = ''\n return 1\n else:\n print(\"\\nYou need to match new suit\")\n print(\"Please try again\\n\")\n return 0\n if self.new_suit == '':\n if self.player_hand[int(inpt)-1][0] == self.discard_pile[0] or \\\n self.player_hand[int(inpt)-1][1] == self.discard_pile[1]:\n self.discard_pile = self.player_hand.pop(int(inpt)-1)\n return 1\n else:\n print(\"\\nYou need to match discard pile card suit or rank\")\n print(\"Please try again\\n\")\n return 0", "def differentiate_cards(card):\n\t\tdef High_Card(numbers,colors):\n\t\t\treturn len(set(numbers)) == 5\n\t\tdef One_Pair(numbers,colors):\n\t\t\treturn len(set(numbers)) == 4\n\t\tdef Two_Pairs(numbers,colors):\n\t\t\tif len(set(numbers)) != 3:\n\t\t\t\treturn False\n\t\t\treturn [numbers.count(i) for i in numbers].count(2) == 4\n\t\tdef Three_of_a_Kind(numbers,colors):\n\t\t\tif len(set(numbers)) != 3:\n\t\t\t\treturn False\n\t\t\tfor i in numbers:\n\t\t\t\tif numbers.count(i) == 3:\n\t\t\t\t\treturn True\n\t\t\treturn False\n\t\tdef Straight(numbers,colors):\n\t\t\tfor i in xrange(1,len(numbers)):\n\t\t\t\tif numbers[i] - numbers[i-1] != 1:\n\t\t\t\t\treturn False\n\t\t\treturn True\n\t\tdef Flush(numbers,colors):\n\t\t\treturn len(set(colors)) == 1\n\t\tdef Full_House(numbers,colors):\n\t\t\tnumbers_set = set(numbers)\n\t\t\tif len(numbers_set) != 2:\n\t\t\t\treturn False\n\t\t\ta = numbers[0]\n\t\t\tb= [x for x in numbers if x != a][0]\n\t\t\treturn (numbers.count(a) == 2 and numbers.count(b) == 3) or\\\n\t\t\t\t(numbers.count(a) == 3 and numbers.count(b) == 2)\n\t\tdef Four_of_a_Kind(numbers,colors):\n\t\t\tfor i in set(numbers):\n\t\t\t\tif numbers.count(i) == 4:\n\t\t\t\t\treturn True\n\t\t\treturn False\n\t\tdef Straight_Flush(numbers,colors):\n\t\t\treturn Straight(numbers,colors) and Flush(numbers,colors)\n\t\tdef Royal_Flush(numbers,colors):\n\t\t\tRoyal = [10,11,12,13,14]\n\t\t\treturn numbers == Royal and Flush(numbers,colors)\n\n\t\tcards = {'1':1,'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,'9':9,\n\t\t 'T':10,'t':10,'J':11,'j':11,'Q':12,'q':12,'K':13,'k':13,'A':14,'a':14}\n\t\tnumbers = [cards[i[0]] for i in card]\n\t\tnumbers.sort()\n\t\tcolors = [i[1] for i in card]\n\t\t\n\t\tif Royal_Flush(numbers,colors):return 9\n\t\telif Straight_Flush(numbers,colors):return 8\n\t\telif Four_of_a_Kind(numbers,colors):return 7\n\t\telif Full_House(numbers,colors):return 6\n\t\telif Flush(numbers,colors):return 5\n\t\telif Straight(numbers,colors):return 4\n\t\telif Three_of_a_Kind(numbers,colors):return 3\n\t\telif Two_Pairs(numbers,colors):return 2\n\t\telif One_Pair(numbers,colors):return 1\n\t\telif High_Card(numbers,colors):return 0", "def waitForCard(attempts=0):\n if attempts == 0:\n for i in range(5):\n print()\n\n print(\"💳\") # unicode card emoji\n card = getpass(prompt=\"Wave card now:\")\n card = card.strip()\n\n # On Linux, there is an issue where the first card read works\n # fine, then the second card read causes getpass() to only return\n # part of the card. Here, we wait until the correct number of\n # characters are present in the input. Note that if someone types\n # something on the keyboard and then uses the card, it won't work.\n # The next time the card is used, it should work.\n while len(card) < 10:\n card = card + getpass(prompt=\"\")\n card = card.strip()\n\n # Check if card is valid, get a hash of card so we don't have to\n # worry about saving the card data directly.\n hashed = validCard(card)\n\n # Drop the raw data of the card, we don't need it and don't want\n # it anymore.\n del card\n\n\n if hashed == None:\n print(\"🚫\") # prohibited emoji\n print(\"ERROR: Invalid card. Try again.\")\n soundError()\n return waitForCard(attempts+1)\n\n return hashed", "def determineWinner(self):\n if self.game_state.numActive() == 1:\n for player in self.game_state.player_list:\n if self.game_state.active_dict[player.name]:\n print \"\"\n print player.name + \" wins with\"\n for card in self.player_hand_dict[player.name]:\n print card\n print \"and takes \" + str(self.game_state.pot) + \" chips!\"\n self.game_state.player_chips[player.name] += self.game_state.pot\n return\n\n for player in self.game_state.player_list:\n for card in self.game_state.board:\n self.player_hand_dict[player.name].append(Card(card.suit, card.rank))\n hand_ranking = HandRanking(self.game_state.player_list, self.player_hand_dict)\n hand_ranking.rankHands()\n winning_rank = -1\n winner = None\n tie_list = []\n \"\"\" Get winning rank, only consider active players for the pot \"\"\"\n for player in self.game_state.player_list:\n if self.game_state.active_dict[player.name] == True:\n if DEBUG:\n print \"Considering \" + str(player.name) + \"'s hand for the pot.\"\n if hand_ranking.player_ranks_dict[player.name] > winning_rank:\n winning_rank = hand_ranking.player_ranks_dict[player.name]\n winner = player \n tie_list = []\n tie_list.append(player)\n elif hand_ranking.player_ranks_dict[player.name] == winning_rank:\n tie_list.append(player)\n \"\"\" winner should never be equal to None \"\"\"\n\n \"\"\" Check for tie and resolve if needed \"\"\"\n if len(tie_list) > 1:\n if DEBUG:\n print \"found potential tie...\"\n for player in tie_list:\n print player.name + \"'s hand:\"\n for card in hand_ranking.player_best_hand_dict[player.name]:\n print card\n print \"resolving tie...\"\n result_tie_list = self.resolveTie(hand_ranking, tie_list)\n print \"\"\n self.printPlayersHands()\n for player in result_tie_list:\n print player.name + \",\",\n print \" wins with\",\n hand_ranking.printRanking(winning_rank)\n print \"and takes \" + str(self.game_state.pot / len(tie_list)) + \" chips!\"\n for player in result_tie_list:\n self.game_state.player_chips[player.name] += self.game_state.pot / len(tie_list)\n else:\n print \"\"\n self.printPlayersHands()\n print winner.name + \" wins with\",\n hand_ranking.printRanking(winning_rank)\n print \"and takes \" + str(self.game_state.pot) + \" chips!\"\n self.game_state.player_chips[winner.name] += self.game_state.pot", "def play_card(game_id, player_id, card_id, selected_color=None):\n game_data = load_state(game_id)\n if not game_data:\n return False\n players = game_data.get('players')\n if player_id not in [p['id'] for p in players]:\n return False\n player = [p for p in players if p['id'] == player_id][0]\n if not player['active']:\n return False\n if card_id not in [c['id'] for c in player['hand']]:\n return False\n if not game_data['active']:\n return False\n card = [c for c in player['hand'] if c['id'] == card_id][0]\n msg = make_danger_message('You can\\'t play that card!')\n if not can_play_card(game_data, card):\n flash_player(game_data, player, msg)\n return False\n if card['value'] == 'WILD_DRAW_FOUR':\n if player_has_matching_color_card(game_data, player):\n flash_player(game_data, player, msg)\n return False\n if card['value'] in SPECIAL_CARDS:\n if selected_color not in CARD_COLORS:\n flash_player(game_data, player, msg)\n return False\n card['color'] = selected_color\n player['hand'].remove(card)\n if len(player['hand']) == 1:\n msg = make_info_message('Only one card to go!')\n alt_msg = make_warning_message(\n '{} only has one card left!'.format(player['name']))\n flash_player(game_data, player, msg, alt_msg)\n game_data['stack'].append(card)\n if card['value'] == 'REVERSE':\n game_data['reverse'] = not game_data['reverse']\n if len(game_data['players']) != 2:\n if game_data['reverse']:\n msg = make_info_message('Game order has been reversed')\n else:\n msg = make_info_message('Game order is back to normal')\n flash_broadcast(game_data, msg)\n if not player['hand']:\n set_round_winner(game_data, player)\n else:\n activate_next_player(game_data)\n save_state(game_data)\n return True", "def determine_winner(self):\n if self.player.sum_cards() > 21:\n print(\"BUST! Dealer wins.\")\n\n elif self.dealer.sum_cards() > 21:\n print(\"DEALER BUSTS! You win\")\n\n elif self.player.sum_cards() > self.dealer.sum_cards():\n print(\"You win!\")\n\n elif self.dealer.sum_cards() > self.player.sum_cards():\n print(\"Dealer wins!\")\n\n else:\n print(\"It's a tie!\")", "def card(bot, update):\n query = update.callback_query\n user = query.from_user\n chat_id = query.message.chat_id\n selected_card = query.data\n\n if (chats[chat_id].player1.card_played == []) and (chats[chat_id].player2.card_played == []):\n bot.send_message(text=Strings.CARD_SELECTED.format(user.first_name),\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n parse_mode=ParseMode.MARKDOWN,\n isgroup=True)\n if chats[chat_id].player1.user == user:\n chats[chat_id].player1.card_played = chats[chat_id].player1.hand[int(selected_card)]\n chats[chat_id].player1.hand.remove(chats[chat_id].player1.hand[int(selected_card)])\n\n elif chats[chat_id].player2.user == user:\n chats[chat_id].player2.card_played = chats[chat_id].player2.hand[int(selected_card)]\n chats[chat_id].player2.hand.remove(chats[chat_id].player2.hand[int(selected_card)])\n return CARD\n\n else:\n if chats[chat_id].player1.user == user and chats[chat_id].player1.card_played != []:\n bot.send_message(text=Strings.CARD_SELECTED2.format(user.first_name),\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n parse_mode=ParseMode.MARKDOWN, isgroup=True)\n return CARD\n elif chats[chat_id].player2.user == user and chats[chat_id].player2.card_played != []:\n bot.send_message(text=Strings.CARD_SELECTED2.format(user.first_name),\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n parse_mode=ParseMode.MARKDOWN, isgroup=True)\n return CARD\n else:\n if chats[chat_id].player1.user == user:\n chats[chat_id].player1.card_played = chats[chat_id].player1.hand[int(selected_card)]\n chats[chat_id].player1.hand.remove(chats[chat_id].player1.hand[int(selected_card)])\n\n elif chats[chat_id].player2.user == user:\n chats[chat_id].player2.card_played = chats[chat_id].player2.hand[int(selected_card)]\n chats[chat_id].player2.hand.remove(chats[chat_id].player2.hand[int(selected_card)])\n\n bot.edit_message_text(text=Strings.CARD_SELECTED.format(user.first_name),\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n parse_mode=ParseMode.MARKDOWN)\n bot.send_message(chat_id,\n Strings.SELECTION_COMPLETED,\n parse_mode=ParseMode.MARKDOWN, isgroup=True)\n\n reply_markup = ReplyKeyboardMarkup(c_b_keyboard, selective=False)\n bot.send_message(chat_id,\n Strings.QUESTION,\n reply_markup=reply_markup,\n parse_mode=ParseMode.MARKDOWN, isgroup=True)\n return BET_CHECK", "def check_valid(self, cards):\n\n if len(cards) == 1: # one card\n return True\n if len(cards) == 2: # two cards\n if ((self.num_to_card(int(cards[0])) == self.num_to_card(int(cards[1]))) or # two same cards\n (int(cards[0]) > 51) or # any card and a joker\n (int(cards[1])) > 51): # any card and a joker\n return True\n return False\n\n # 3 or more: all same number/ascending order\n # check how many jokers\n jokers = 0\n for card in cards:\n #print(int(card))\n #print(self.num_to_card(card))\n if int(card) > 51:\n jokers += 1\n #print(\"YESSSSSSSSSSIR\")\n #print(f'[THERE ARE {jokers} JOKERS]')\n\n # check if all same number\n sort = sorted(cards)\n #print(f'[THE SORTED CARDS: {sort}]')\n index = 0\n for card in sort:\n if self.num_to_card(int(card)) == self.num_to_card(int(sort[0])) or int(card) > 51:\n index += 1\n if index == len(cards):\n return True\n\n # check ascend order\n if not self.is_same_sign(cards):\n print('Here')\n return False\n\n #print(\"accend left\")\n return self.ascend(cards, jokers)", "def adding_card():\n another_card = input(\"Type 'y' to get another card, type 'n' to pass: \").lower()\n if another_card =='y':\n #Adds another card to the user and calculates its score\n user_cards.append(random.choice(cards))\n ace_degrade(user_cards,calculate_score(user_cards))\n calculate_score(user_cards)\n user_score = calculate_score(user_cards)\n #score > 21, ends the game\n if user_score > 21:\n display_during_game(\"end\")\n print(\"You went over. You lose\")\n another_card ='n' \n #score <= 21 continue the game \n else:\n display_during_game(\"game\")\n adding_card()", "def rocksPaperScissors(player1, player2, advanced = False ):\n \n valid_plays = None\n if advanced: valid_plays = [\"Rock\", \"Paper\", \"Scissors\", \"Spock\", \"Lizard\"]\n else: valid_plays = [\"Rock\", \"Paper\", \"Scissors\"]\n\n if player1 not in valid_plays or player2 not in valid_plays:\n print (\"One or both players did not provide a valid_hand\")\n return\n \n if player1 == player2: print (\"Tie\")\n else:\n d = list(map(lambda x: x[1], defeats[player1]))\n if player2 in d:\n verb = defeats[player1][d.index(player2)][0]\n print( player1 + \" \" + verb + \" \" + player2)\n print(\"Player 1 wins\")\n else:\n d = list(map(lambda x: x[1], defeats[player2]))\n verb = defeats[player2][d.index(player1)][0]\n print (player2 + \" \" + verb + \" \" + player1 )\n print (\"Player 2 wins\")", "def compare_cards(card1, card2, deck, assigned_card_value):\n if card1 not in deck: \n raise ValueError(\"The card doesn't exist\")\n if card2 not in deck: \n raise ValueError(\"The card doesn't exist\")\n card1 = assigned_card_value.get(card1)\n card2 = assigned_card_value.get(card2)\n if card1 < card2:\n return 1\n elif card1 > card2:\n return 0\n elif card1 == card2:\n return -1", "def test_play_nobane(self):\n self.victim.piles[Piles.HAND].set(\"Copper\", \"Silver\")\n self.attacker.piles[Piles.HAND].set(\n \"Copper\", \"Silver\", \"Gold\", \"Duchy\", \"Province\"\n )\n self.attacker.add_card(self.card, Piles.HAND)\n self.attacker.test_input = [\"Duchy\", \"Province\", \"finish\"]\n self.attacker.play_card(self.card)\n try:\n self.assertIn(self.g[self.g._bane].cost, (2, 3))\n self.assertEqual(self.attacker.piles[Piles.HAND].size(), 5 + 2 - 2)\n self.assertIn(\"Curse\", self.victim.piles[Piles.DISCARD])\n except AssertionError: # pragma: no cover\n print(f\"Bane={self.g._bane}\")\n self.g.print_state()\n raise", "def test_play_card(self):\n while True:\n card = self.g[\"Clashes\"].remove()\n if card.name == \"Battle Plan\":\n break\n self.plr.piles[Piles.DECK].set(\"Gold\")\n self.plr.piles[Piles.HAND].set(\"Estate\", \"Militia\")\n self.plr.add_card(card, Piles.HAND)\n self.plr.test_input = [\"Reveal Militia\", \"Rotate Clashes\"]\n self.plr.play_card(card)\n self.assertIn(\"Gold\", self.plr.piles[Piles.HAND])\n next_card = self.g[\"Clashes\"].remove()\n self.assertEqual(next_card.name, \"Archer\")", "def testCard(self):\n # test1\n cardObj1 = Card('A','d')\n self.assertEquals(1,cardObj1.get_rank())\n self.assertEquals('d',cardObj1.get_suit())\n # test2\n cardObj2 = Card('J','d')\n self.assertEquals(10,cardObj2.get_rank())\n # test3\n cardObj3 = Card(5,'d')\n self.assertEquals(5,cardObj3.get_rank())", "def check_winner(self):\n if self.player1.chips <= BIG_BLIND_BET:\n return 2\n elif self.player2.chips <= BIG_BLIND_BET:\n return 1\n else:\n return 0", "def test_discard_action(self):\n self.plr.test_input = [\"discard silver\", \"finish selecting\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 2)\n self.assertEqual(self.plr.actions.get(), 2)\n self.assertEqual(self.plr.buys.get(), 1)\n self.assertNotIn(\"Silver\", self.plr.piles[Piles.HAND])", "def attack(self): # need to check defenders handcount\n \"\"\"Always returns a list of values\"\"\"\n if self.AI:\n # return rand.randint(0,len(self.currentHand))\n Error(\"AI not yet implemented for Attacking\")\n else:\n print(\"Select card from... \")\n cardManager.printHand(self.currentHand)\n card = int(input(\"to your attack: \"))\n while card not in self.currentHand: # error checking\n print(\"Please select a valid card from...\", end = \" \")\n cardManager.printHand(self.currentHand)\n card = int(input())\n self.currentHand.remove(card)\n card = self.checkDoubles(card)\n return card", "def is_blackjack(self) -> bool:\n if self.score == 21 and len(self.cards) == 2:\n return True\n else:\n return False", "def player_card_one_choice(update, context):\n query = update.callback_query\n if query.message.reply_to_message:\n user = query.message.reply_to_message.from_user\n else:\n user = query.message.chat\n bot = context.bot\n CURRENT_CONTEXT = USERS[user.username]\n message = f'Round: {CURRENT_CONTEXT[\"round\"]} ({CURRENT_CONTEXT[\"username\"]}) \\nDealers Card: {CURRENT_CONTEXT[\"dealer_card\"]}\\nYour Cards: {CURRENT_CONTEXT[\"player_cards\"]} \\nYour total: {CURRENT_CONTEXT[\"player_total\"]} \\n\\nChoose Your 1st Card: '\n bot.edit_message_text(\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n text=message,\n reply_markup=card_markup\n )\n\n return PLAYER_CARD_TWO", "def can_be_played(cls, card, context={}):\n\t\treturn True", "def check_for_blackjack(self):\n if (self.dealer.hand.value + self.dealer.face_down.value) == 21:\n if self.player.hand.blackjack:\n return self.blackjack_push()\n else:\n return self.blackjack_dealer_win()\n\n if self.player.hand.blackjack():\n return self.blackjack_player_win()\n lost_insurance_bet(self.side_bet)\n return False", "def can_afford_card(self,\n card: Card) -> bool:\n price_after_discount = card.price % self.discount()\n missing_gems = 0\n for gem_color in GemColor:\n if gem_color != GemColor.GOLD:\n missing_gems += max(price_after_discount.value(gem_color) - self.gems_possessed.value(gem_color),0)\n return self.gems_possessed.value(GemColor.GOLD) >= missing_gems", "def attempt(chal, request):\n team = Teams.query.filter_by(id=session['id']).first()\n if locked(chal):\n return False, 'Challenge Locked. You need at least {} points.'.format(chal.unlock_at)\n \n provided_key = request.form['key'].strip()\n chal_keys = Keys.query.filter_by(chal=chal.id).all()\n for chal_key in chal_keys:\n if get_key_class(chal_key.type).compare(chal_key.flag, provided_key):\n return True, 'Correct'\n return False, 'Incorrect'", "def compare_cards(p1_name, p2_name, card1, card2, ranks=['2','3','4','5','6','7','8','9','10','J','Q','K','A']):\n\n rank1, rank2 = card1[:-1], card2[:-1]\n\n if rank1 not in ranks: raise ValueError(\"Card 1 does not have a valid card value!\")\n if rank2 not in ranks: raise ValueError(\"Card 2 does not have a valid card value!\")\n\n print(p1_name+\"\\'s\", card1, \"vs.\", p2_name+\"\\'s\", card2)\n\n winner = -1\n\n if (rank1 == rank2): winner = 0\n elif (rank1 == '2' and rank2 == 'A'): winner = 1\n elif (rank1 == 'A' and rank2 == '2'): winner = 2\n else: winner = 1 if (ranks.index(rank1) > ranks.index(rank2)) else 2\n\n if (winner == 0): print(\"There Was a Tie Between\", card1, \"and\", card2)\n elif (winner == 1): print(p1_name, \"Wins This Round With a\", card1, \"Against a\", card2)\n elif (winner == 2): print(p2_name, \"Wins This Round With a\", card2, \"Against a\", card1)\n\n return winner", "def play_card(self, rnd: PlayerRound) -> int:\n # we can check if we are playing the correct game\n assert rnd.jass_type == JASS_HEARTS\n\n # get the valid cards to play\n valid_cards = rnd.get_valid_cards()\n\n # lets divide our cards into heart and other cards\n my_heart_cards = valid_cards * color_masks[HEARTS, :]\n my_other_cards = valid_cards - my_heart_cards\n\n if rnd.nr_cards_in_trick == 0:\n # we are the first player, so we can select what to play\n # lets select some random non-heart card if we have any (not that this is necessarily\n # a good strategy :-)\n if my_other_cards.sum() > 0:\n card = np.random.choice(np.flatnonzero(my_other_cards))\n else:\n # just play a random valid card\n card = np.random.choice(np.flatnonzero(valid_cards))\n else:\n # if we have to give a card, lets try to give a heart card\n if my_heart_cards.sum() > 0:\n card = np.random.choice(np.flatnonzero(my_heart_cards))\n else:\n # just play a random valid card\n card = np.random.choice(np.flatnonzero(valid_cards))\n\n self._logger.debug('Played card: {}'.format(card_strings[card]))\n return card", "def score_card_response(card, response):\n\n # If not a choice card, raise Exception(\"No method implemented.\")\n\n for opt in card['options']:\n if response == opt['value']:\n if opt['correct']:\n return 1.0, opt['feedback']\n else:\n return 0.0, opt['feedback']\n\n return 0.0, 'Default error ajflsdvco'", "def __eq__(self, card2):\n return self.suit == card2.suit and self.rank == card2.rank", "def test_for_non_blackjack(self):\n hand = self._hand\n cards = [BjCard('clubs', '8'), BjCard('diamonds', '8')]\n for card in cards:\n hand.add_card(card)\n self.assertEqual(hand.is_blackjack, False)", "def player_hand_contains_suit(self, user_id, suit):\n print \"player_hand_contains_suit(self, user_id, suit) \"\n print \" Checking if player hand contains expected suit: {}\".format(self.bot.leading_suit)\n for user_object in self.bot.current_game.players:\n if user_object.id == user_id:\n card_value = None\n card_suit = None\n for card_obj in user_object.cards_in_hand:\n if len(card_obj) == 2:\n card_value = str(card_obj[0])\n card_suit = card_obj[1]\n else:\n card_value = str(card_obj)\n card_suit = None\n if \"d_\" not in card_value and \"t_\" not in card_value and \"vm_\" not in card_value:\n if card_suit == suit:\n return True\n return False", "def hit(self, card):\n self.append(card)\n values=[]\n values.append(card.value())\n if values[0] < 2:\n values.append(values[0]+ 10)\n new_sums =set([v+s for v in values for s in self.possible_sums if v+s <=21])\n new_sums =sorted(new_sums)\n if len(new_sums) ==0:\n self.hand=-1\n else:\n self.hand = new_sums[-1]\n self.possible_sums = new_sums", "def test_strategy(self):\n\n axelrod.seed(8)\n opponent = axelrod.Cooperator()\n player = axelrod.WorseAndWorse()\n match = axelrod.Match((opponent, player), turns=10)\n self.assertEqual(match.play(), [('C', 'C'),\n ('C', 'C'),\n ('C', 'C'),\n ('C', 'C'),\n ('C', 'C'),\n ('C', 'C'),\n ('C', 'D'),\n ('C', 'C'),\n ('C', 'C'),\n ('C', 'C')])\n\n # Test that behaviour does not depend on opponent\n opponent = axelrod.Defector()\n player = axelrod.WorseAndWorse()\n axelrod.seed(8)\n match = axelrod.Match((opponent, player), turns=10)\n self.assertEqual(match.play(), [('D', 'C'),\n ('D', 'C'),\n ('D', 'C'),\n ('D', 'C'),\n ('D', 'C'),\n ('D', 'C'),\n ('D', 'D'),\n ('D', 'C'),\n ('D', 'C'),\n ('D', 'C')])", "def checkDoubles(self,card): # need to check defenders handcount...\n multipleCards = [card]\n for i in range(4): # checking all other possible cards of same rank\n card_plus = card + 13 * i # checking higher values\n card_minus = card - 13 * i # checking lower values\n if card_plus in self.currentHand and card_plus < 51 and card_plus != card and card_plus not in multipleCards:\n print(\"Do you wish to add:\")\n cardManager.printHand([card_plus])\n prompt= input(\"to your attack? (y/n):\")\n while prompt != 'y' and prompt != 'n': # input checking\n print(\"Do you wish to add:\")\n cardManager.printHand([card_plus])\n prompt = input(\"to your attack? (y/n):\")\n if prompt == 'y':\n print(\"added\")\n multipleCards.append(card_plus)\n self.currentHand.remove(card_plus)\n else:\n print(\"Did not add\")\n if card_minus in self.currentHand and card_minus > 0 and card_plus != card and card_minus not in multipleCards:\n print(\"Do you wish to add:\")\n cardManager.printHand([card_minus])\n prompt = input(\"to your attack? (y/n):\")\n while prompt != 'y' and prompt != 'n': # input checking\n print(\"Do you wish to add:\")\n cardManager.printHand([card_minus])\n prompt = input(\"to your attack? (y/n):\")\n if prompt == 'y':\n print(\"added\")\n multipleCards.append(card_minus)\n self.currentHand.remove(card_minus)\n else:\n print(\"Did not add\")\n return multipleCards", "def cardPlayable(self, card):\n return self.field[Suit.toInt(card.getSuit()) - 1] == card.getValue() - 1", "def deal_cards(self):\n self.card = random.randint(1, 13)\n return self.card", "def check_card_action(self, card):\n if card.value == \"7\":\n self.seven_punishment()\n elif card.value == \"8\":\n self.eight_punishment()\n elif card.value == \"9\":\n self.nine_punishment()\n elif card.value == \"B\":\n self.jack_wish()", "def set_cards_in_hand(self, player_id: int, cards: List[TableAnswerCard]):\n with self.eng.session_mgr() as session:\n # Determine if space for a new card (any picked / nuked cards?)\n all_cards = session.query(TablePlayerHand).filter(and_(\n TablePlayerHand.player_key == player_id,\n )).all()\n total_card_cnt = len(all_cards)\n available_slots = session.query(TablePlayerHand).filter(and_(\n TablePlayerHand.player_key == player_id,\n or_(\n TablePlayerHand.is_picked,\n TablePlayerHand.is_nuked\n )\n )).all()\n self.log.debug(f'{len(available_slots)} open slots found for user out of {total_card_cnt}. '\n f'{len(cards)} to try to add.')\n if len(available_slots) >= len(cards):\n # Replace the first slot with a card\n self.log.debug('Existing slot(s) were equal to or greater than dealt cards.')\n for i, card in enumerate(cards):\n slot: TablePlayerHand\n slot = available_slots[i]\n self.log.debug(f'Replacing card at slot {slot.card_pos}.')\n slot.is_nuked = slot.is_picked = False\n slot.answer_card_key = card.answer_card_id\n session.add(slot)\n elif len(available_slots) == 0 and total_card_cnt + len(cards) <= 5:\n self.log.debug('No slots available, but total cards plus cards to add were at or less than '\n 'the limit. Creating new cards.')\n taken_positions = [x.card_pos for x in all_cards]\n available_positions = [i for i in range(5) if i not in taken_positions]\n # Possibly dealing with totally new game\n for i, card in enumerate(cards):\n self.log.debug(f'Adding card to new slot {available_positions[i]}...')\n session.add(TablePlayerHand(\n card_pos=available_positions[i],\n player_key=player_id,\n answer_card_key=card.answer_card_id\n ))", "def score_on_hands(cards_on_hand):\r\n score = 0\r\n straightCount = 0\r\n max_card = 0\r\n suite_dict = {}\r\n face_dict = {}\r\n transfer_dict = {'A':1,'J':11,'Q':12,'K':13}\r\n card_face = []\r\n '''Circulate the player's hand, build a list of points and a suit dict'''\r\n for index in range(len(cards_on_hand)):\r\n if str(cards_on_hand[index])[1] in transfer_dict:\r\n card_face.append(transfer_dict.get(str(cards_on_hand[index])[1]))\r\n elif str(cards_on_hand[index])[1] == '1':\r\n card_face.append(10)\r\n else:\r\n card_face.append(int(str(cards_on_hand[index])[1]))\r\n suite_dict[str(cards_on_hand[index])[0]] = 1\r\n '''Because 1 can be treated as 1 or 14, so if 1 exists, add 14 to the end of the list to calculate flush'''\r\n if 1 in card_face:\r\n card_face.append(14)\r\n\r\n '''Check straight, if it is straight, straight should be 4'''\r\n for face in range(len(card_face)-1):\r\n if card_face[face] +1 == card_face[face+1] :\r\n straightCount +=1\r\n\r\n '''Detect the number of cards of the same number'''\r\n for face in card_face:\r\n\r\n if face not in face_dict:\r\n face_dict[face] = 1\r\n else:\r\n face_dict[face] += 1\r\n\r\n '''Store the maximum number of points'''\r\n max_card = card_face[len(card_face)-1]\r\n\r\n '''Calculate player score'''\r\n if straightCount == 4:\r\n score+= 8\r\n\r\n if len(suite_dict) == 1:\r\n score+= 9\r\n\r\n for values in face_dict.values():\r\n if values == 2:\r\n score += 3\r\n elif values == 3:\r\n score += 7\r\n elif values == 4:\r\n score += 11\r\n\r\n return (score, max_card)", "def step(self, action):\n assert self.completed_rounds < self.num_rounds\n\n player = self.players[self.current_player_id]\n card = action\n\n if card not in player.hand:\n raise ValueError(\"Action not allowed because the card is not in the player's hand\")\n\n player.hand.remove(card)\n player.played.add(card)\n # print(f\"Player {self.current_player_id} with hand {[c.id for c in player.hand]} played the card {card.id}\")\n best_combination_on_the_table = self._get_best_combination(card)\n if best_combination_on_the_table:\n self.last_player_capturing_id = self.current_player_id\n player.captured.add(card)\n for c in best_combination_on_the_table:\n self.table.remove(c)\n player.captured.add(c)\n if not self.table and not (self._is_last_round and self._is_round_over()):\n player.scope += 1\n else:\n self.table.add(card)\n # print(f\"Cards on the table after play: {[c.id for c in self.table]}\")\n\n if self._is_round_over():\n self.completed_rounds += 1\n # print(f\"=========== Round {self.current_round} completed ============\")\n self.current_player_id = (self.current_player_id + 1) % self.num_players\n\n if self.is_over():\n last_player_capturing = self.players[self.last_player_capturing_id]\n # print(f\"Giving the remaining cards to player {last_player_capturing.player_id}\")\n for card in self.table:\n last_player_capturing.captured.add(card)\n self.table = set()\n assert all([len(p.played) == 10 for p in self.players])\n assert all([len(p.hand) == 0 for p in self.players])\n return self.get_state(), self.current_player_id", "def hasBlackjack(self):\n return len(self.cards) == 2 and self.getPoints() == 21", "def play_card(self, player, card):\n top_card = self.discard_pile[len(self.discard_pile) - 1]\n if player in self.players and not self.wait_for_card_wish:\n if not player.has_card(card):\n return False\n\n if player != self.current_player:\n return False\n\n if self.card_wished is not None:\n return self._play_wished_card(card)\n\n if top_card.value == \"7\" and self.current_draw_punishment > 1:\n return self._play_card_on_seven(card, top_card)\n\n return self._play_normal_card(card, top_card)\n return False", "def blackjack():\n start_game = input('Would you like to play blackjack? Type \"y\" or \"n\": ').lower()\n if start_game == 'y':\n deal_cards()\n elif start_game == 'n':\n print('Maybe next time!')\n exit()\n else:\n print('Invalid selection. Please try again')\n blackjack()", "def test_strategy(self):\n axelrod.seed(1)\n opponent = axelrod.Cooperator()\n player = axelrod.KnowledgeableWorseAndWorse()\n match = axelrod.Match((opponent, player), turns=5)\n self.assertEqual(match.play(), [('C', 'C'),\n ('C', 'D'),\n ('C', 'D'),\n ('C', 'D'),\n ('C', 'D')])\n\n # Test that behaviour does not depend on opponent\n opponent = axelrod.Defector()\n player = axelrod.KnowledgeableWorseAndWorse()\n axelrod.seed(1)\n match = axelrod.Match((opponent, player), turns=5)\n self.assertEqual(match.play(), [('D', 'C'),\n ('D', 'D'),\n ('D', 'D'),\n ('D', 'D'),\n ('D', 'D')])\n\n # Test that behaviour changes when does not know length.\n axelrod.seed(1)\n match = axelrod.Match((opponent, player), turns=5,\n match_attributes={'length': float('inf')})\n self.assertEqual(match.play(), [('D', 'C'),\n ('D', 'C'),\n ('D', 'C'),\n ('D', 'C'),\n ('D', 'C')])", "def test_deal_sufficient_cards(self):\n cards = self.deck._deal(10)\n self.assertEqual(len(cards), 10)\n self.assertEqual(self.deck.count(), 42)", "def player_card_two_choice(update, context):\n query = update.callback_query\n if query.message.reply_to_message:\n user = query.message.reply_to_message.from_user\n else:\n user = query.message.chat\n bot = context.bot\n CURRENT_USER = USERS[user.username]\n CURRENT_CONTEXT = process_card_value(query.data, CURRENT_USER)\n message = f'Round: {CURRENT_CONTEXT[\"round\"]} ({CURRENT_CONTEXT[\"username\"]}) \\nDealers Card: {CURRENT_CONTEXT[\"dealer_card\"]}\\nYour Cards: {CURRENT_CONTEXT[\"player_cards\"]} \\nYour total: {CURRENT_CONTEXT[\"player_total\"]} \\n\\nChoose Your 2nd Card: '\n bot.edit_message_text(\n chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n text=message,\n reply_markup=card_markup\n )\n\n return DEALER_CARD", "def Deal():\r\n cardsout = []\r\n cardoptions = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23]\r\n topcardoptions = [0,2,3,4,5,6]\r\n topcard = topcardoptions[random.randint(0,5)]\r\n cardoptions.pop(cardoptions.index(topcard))\r\n cardsout.append(topcard)\r\n\r\n if SHOWHAPPENINGS == True:\r\n disp = card_dict[topcard]\r\n print(\"Topcard is: {}\".format(disp)) \r\n\r\n for i in range(4):\r\n numcards = 0\r\n while numcards < 5:\r\n possiblerange = len(cardoptions) - 1\r\n cardindex = random.randint(0,possiblerange)\r\n card = cardoptions[cardindex]\r\n cardsout.append(card)\r\n cardoptions.pop(cardoptions.index(card))\r\n PlayerHands[i].append(card)\r\n numcards += 1\r\n PlayerHands[i] = sorted(PlayerHands[i]) #putting into ascending order\r\n if i == 0 or i == 2:\r\n PlayerHands[i].append(\"RedTeam\")\r\n else: \r\n PlayerHands[i].append(\"BlackTeam\")\r\n \r\n PlayerHands[0].append(PLAYER1)\r\n PlayerHands[1].append(PLAYER2)\r\n PlayerHands[2].append(PLAYER3)\r\n PlayerHands[3].append(PLAYER4)\r\n #PlayerHand format = [card1,card2,card3,card4,card5,Team,Name]\r\n\r\n return topcard", "def play_for_dealer(self):\n while self.dealer.sum_cards() < 17:\n self.dealer.hit(self.deck)\n else:\n self.round_winner = True\n self.print_hands()\n self.determine_winner()", "def validate_card():\r\n print(\"Please insert your card\")\r\n card = int(input(\"Please enter 1 if you entered your card\"))\r\n return card", "def has_won(self):\n coders_card = self.get_coders().get_amount()\n if coders_card > 3:\n return True\n else:\n return False", "def is_valid(current_card: Card, destination: Card) -> bool:\n # TODO: check for a card to a space is only Kings; maybe in the board?\n match = current_card.color == destination.color\n difference = destination.value - current_card.value\n if not match and difference == 1:\n return True\n else:\n return False", "def test_deck_contains_all_cards(self):\n\n # I'm using collections.Counter so that the order is ignored (as in a\n # set) but that multiples are accounted for.\n expected = collections.Counter([\n ('r', 'i'), ('r', 'i'), ('r', 'i'),\n ('r', 2), ('r', 3), ('r', 4), ('r', 5), ('r', 6), \n ('r', 7), ('r', 8), ('r', 9), ('r', 10),\n\n ('g', 'i'), ('g', 'i'), ('g', 'i'),\n ('g', 2), ('g', 3), ('g', 4), ('g', 5), ('g', 6),\n ('g', 7), ('g', 8), ('g', 9), ('g', 10),\n\n ('b', 'i'), ('b', 'i'), ('b', 'i'),\n ('b', 2), ('b', 3), ('b', 4), ('b', 5), ('b', 6),\n ('b', 7), ('b', 8), ('b', 9), ('b', 10),\n\n ('y', 'i'), ('y', 'i'), ('y', 'i'),\n ('y', 2), ('y', 3), ('y', 4), ('y', 5), ('y', 6),\n ('y', 7), ('y', 8), ('y', 9), ('y', 10),\n\n ('w', 'i'), ('w', 'i'), ('w', 'i'),\n ('w', 2), ('w', 3), ('w', 4), ('w', 5), ('w', 6),\n ('w', 7), ('w', 8), ('w', 9), ('w', 10), ])\n\n self.assertEqual(expected, collections.Counter(deck.deck_gen()))" ]
[ "0.70289963", "0.70051605", "0.6518905", "0.6450109", "0.63630843", "0.630566", "0.62759304", "0.62171423", "0.61978257", "0.61198056", "0.60976046", "0.60950667", "0.6085195", "0.6076674", "0.60443", "0.60375905", "0.60289156", "0.59881556", "0.59762114", "0.59721756", "0.5937578", "0.5936938", "0.59308237", "0.59230936", "0.5919256", "0.5891327", "0.5885813", "0.5869765", "0.5838972", "0.5826585", "0.58208126", "0.58187556", "0.5811613", "0.5798809", "0.57924414", "0.57899654", "0.5789487", "0.5786951", "0.57851076", "0.57490516", "0.5747439", "0.5734018", "0.5713652", "0.57130736", "0.5707124", "0.57041574", "0.56912166", "0.56881744", "0.56865174", "0.5685391", "0.56779003", "0.5667383", "0.56519014", "0.56485057", "0.5648174", "0.5647551", "0.5643066", "0.5640654", "0.5636938", "0.5634847", "0.5626089", "0.5613006", "0.5607926", "0.5590724", "0.5590525", "0.5586366", "0.55820733", "0.5579995", "0.5579697", "0.55603683", "0.5557818", "0.55568475", "0.55448186", "0.553393", "0.5532736", "0.5529835", "0.5528047", "0.552574", "0.55186677", "0.5516898", "0.5514075", "0.5513057", "0.5509926", "0.55078447", "0.55067366", "0.5505701", "0.54979706", "0.54975677", "0.5494724", "0.5493335", "0.5491596", "0.5489584", "0.54870075", "0.5486181", "0.54827803", "0.5482065", "0.5478987", "0.5477538", "0.54750144", "0.54727393" ]
0.82993805
0
If test_mode is True, an image of `screen` is saved
def save_screen(screen): if not video_mode: # Don't record video return False # Make global variables writeable global current_frame global path_checked frames_directory = os.path.dirname( os.path.dirname( os.path.realpath(__file__))) + "\\frames\\" if not path_checked: check_folder(frames_directory) pygame.image.save( screen, frames_directory + "ants-frame{}.jpeg".format( str(current_frame).zfill(4))) current_frame += 1 # Move count to next frame
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def saveScreenPNG(self, filename):\n return nes_lib.saveScreenPNG(self.obj, filename)", "def screen_shot(self):\n screen_size = '{}x{}@{}x{}/0'.format(self.screen[0], self.screen[1], self.screen[0], self.screen[1])\n subprocess.check_call([\n ADB_EXECUTOR, '-s', self.device_id, 'shell',\n 'LD_LIBRARY_PATH=/data/local/tmp', '/data/local/tmp/minicap', '-s', '-P', screen_size,\n '>', TEMP_PIC_ANDROID_PATH\n ], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n logger.info('screen shot saved in {}'.format(TEMP_PIC_ANDROID_PATH))", "def screen_shot():\n screen_shot_string_io = StringIO.StringIO()\n ImageGrab.grab().save(screen_shot_string_io, \"PNG\")\n screen_shot_string_io.seek(0)\n return screen_shot_string_io.read()", "def save_screenshot(self, file_name, width=3840, height=2160, first=True, last=True):\n if first and self.assigned_opengl_context is not None:\n self.assigned_opengl_context.makeCurrent()\n gr3.export(file_name, width, height)\n if last and self.assigned_opengl_context is not None:\n self.assigned_opengl_context.doneCurrent()", "def screens_maker(task):\n rend_type = int( task['render_type'] )\n rend_project = task['project_name']\n rend_result_dir = task['result_dir']\n file_name = p_rend_type[rend_type]['file_screen'].split( '/' )[1]\n logging.info( 'IN SCREEN Maker {}'.format( task ) )\n try:\n bpy.ops.wm.open_mainfile( filepath=rend_project )\n scn = bpy.context.scene\n scn.frame_start = 100\n scn.frame_end = 101\n bpy.data.scenes[scn.name].render.image_settings.file_format = 'JPEG'\n scn.render.filepath = '{}'.format( str( rend_result_dir ) + '/' + str( file_name ) )\n bpy.ops.render.render( write_still=True )\n try:\n os.chown( scn.render.filepath, int( u_ugid ), int( u_gguid ) )\n os.chmod( scn.render.filepath, 0o777 )\n except Exception as e:\n logging.info( 'err SCREEN MAKER rights{}'.format( str( e ) ) )\n except Exception as e:\n logging.info( 'ERR IN SCREEN Maker {}'.format( str( e ) ) )\n\n return 1", "def snapshot(self, file_path=None):\n \"\"\"default not write into file.\"\"\"\n screen = self.minicap.get_frame()\n\n if file_path:\n file_name = str(time.time()*1000) + '.jpg'\n file_path = os.path.join(file_path, file_name)\n ImgUtils.imwrite(file_path, screen)\n\n # '''t_img 需转换为cv2可解码的文件,不然会抛错 src is not a numpy array, neither a scalar'''\n # try:\n # screen = ImgUtils.str2img(screen)\n # except Exception:\n # # may be black/locked screen or other reason print exc for debugging\n # import traceback\n # traceback.print_exc()\n # return None\n\n return screen", "def screenGrab():\n box = (x_pad+1, y_pad+1, 796, 825)\n save_directory = os.getcwd()\n time_stamp = int(time.time())\n image_file_name = '{}\\\\full_snap__{}.png'.format(save_directory, time_stamp)\n im = ImageGrab.grab(box)\n im.save(image_file_name, 'PNG')", "def screen_shot(self, pic_path):\n self.run_command(f'shell screencap -p /sdcard/screen.png')\n if not path.exists(pic_path):\n self.run_command(f'pull /sdcard/screen.png {pic_path}')\n else:\n raise ADBError(f'{pic_path} already exist')\n self.run_command(f'shell rm /sdcard/screen.png')\n yield pic_path\n remove(pic_path)", "def get_screen_image(dir=\"screenshots\"):\n screenshot_name = dir + \"/screenshot_\" + str(random.randint(0, 1e10)) + \".png\"\n\n screenshot = autopy.bitmap.capture_screen()\n screenshot.save(screenshot_name)\n return screenshot_name", "def capture(self):\n current_time=time.strftime('%Y%m%d-%H%M%S')\n self.filepath=f\"files/{current_time}.png\"\n self.ids.camera.export_to_png(self.filepath)\n self.manager.current='image_screen'\n self.manager.current_screen.ids.img.source=self.filepath", "def test_save_screenshot():\n\n surface_flow_file = Path(TEST_RESULTS_FILES_PATH, \"surface_flow.vtu\")\n screenshot_file = save_screenshot(surface_flow_file, \"Mach\")\n assert screenshot_file.exists()\n\n if screenshot_file.exists():\n screenshot_file.unlink()", "def saveimage(self):\n if self.saveimageButton.isChecked():\n self.save = True\n self.channelsOpen()\n self.movetoStart()\n self.saveimageButton.setText('Abort')\n self.guarda = np.zeros((self.numberofPixels, self.numberofPixels))\n self.liveviewStart()\n\n else:\n self.save = False\n print(\"Abort\")\n self.saveimageButton.setText('reintentar Scan and Stop')\n self.liveviewStop()", "def export_screen(self, target_path):\n subprocess.check_call([\n ADB_EXECUTOR, '-s', self.device_id,\n 'pull', TEMP_PIC_ANDROID_PATH, target_path\n ], stdout=subprocess.DEVNULL)\n logger.info('export screen shot to {}'.format(target_path))", "def saveWindowState(self):\n print(\"Save button has been pressed!\")\n screenshot = self.widgetHolder.grab()\n self.screenshotNum += 1\n if(self.addressBox.text() != \"\"):\n screenshot.save(os.path.join(self.addressBox.text(), (\"screenshot\" + str(self.screenshotNum) + \".jpg\")))\n else:\n screenshot.save(\"screenshot\" + str(self.screenshotNum) + \".jpg\", \"jpg\")", "def screenShot(self, cam=None, path=os.path.expanduser('~'), basenm='view'):\n if cam is None:\n # This allows use to dynamicly select cameras\n cam = GetActiveCamera()\n os.chdir(path)\n self.view(cam=cam)\n WriteImage(\"%s.png\" % (basenm))", "def export_screenshot(self):\n\n if self.vis_type is None or len(self.vis_type) < 1:\n vis_type_suffix = ''\n else:\n vis_type_suffix = self.vis_type\n\n print(\"exporting screenshot for {}\".format(self.current_unit_id))\n ss_out_file = self.screenshot_dir / \"{}_{}_{}.{}\".format(\n self.current_unit_id, vis_type_suffix,\n cfg.screenshot_suffix, cfg.screenshot_format_ext)\n self.fig.savefig(ss_out_file, bbox_inches='tight', dpi=cfg.dpi_export_fig)", "def save_detection(self, image):\n\t\timg = self.visualize_detection(image)\n\t\timg = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n\t\tcv2.imwrite(f'{SAVE_PATH}{self.clip}{self.num_save}.jpg', img)", "def _save_buffer(self):\n img_data = renderer.fbuffer.read(mode='color', alpha=False)\n img = Image.fromarray(img_data)\n img.save(self._save_fname)\n self._save_flag = False", "def save_screenshot(self, img, file_name: str):\n img.save(str(self.info.screenshots_path / file_name))", "def setScreenMode(mode='normal'):\n screendict = {'normal':'REVERS', 'black':'NOREV'}\n dislin.scrmod(screendict[mode])", "def capture(self):\n current_time = time.strftime('%Y%m%d-%H%M%S')\n filepath = f'files/{current_time}.png'\n self.ids.camera.export_to_png(filepath)\n self.manager.current = 'image_screen' # switch to the next screen\n self.manager.current_screen.ids.img.source = filepath # inherit img to the next screen\n return filepath", "def write_to_screen(self, text):\n\t\tself.blank_image = np.full((1280,1920, 3), 255, np.uint8)\n\t\tcv2.putText(self.blank_image, text,(40,300), font, 8,(0,0,0),3,cv2.LINE_AA)\n\t\tcv2.imshow(\"Background\", self.blank_image)\n\t\tcv2.waitKey(1)", "def draw(canvas_result,automatic_save,manual_save):\r\n if canvas_result is not None and canvas_result.image_data is not None and (automatic_save or manual_save):\r\n # Receive the user's drawing with the dimensions: 512X512X4\r\n img_data = canvas_result.image_data\r\n # the user's drawing is in RGBA mode with floats instead of integers - convert to uint8 type and to RGB format\r\n im = Image.fromarray(img_data.astype(np.uint8)[:,:,:3]).convert('RGB') # convert to dimensions 512X512X3\r\n # initialize a copy of the user's drawing.\r\n add_bg = np.array(im, dtype='uint8') # initalize a copy\r\n # allow the user to know that the saving is in progress.\r\n with st.spinner(\"Saving image...\"):\r\n # the drawing is lack of the GauGAN background because streamlit_drawable_canvas library doesn't allow it yet.\r\n # Because of that the background will be added manually - o(n^3) at the moment.\r\n for i in range(add_bg.shape[0]):\r\n for j in range(add_bg.shape[1]):\r\n if list(add_bg[i,j]) != [0,0,0]: # if the current RGB value is not (0,0,0) (black) -\r\n for k in range(add_bg.shape[2]): # then make sure we don't have white values (255)\r\n if add_bg[i,j][k] == 255: # we will fill them with the relevant background color position\r\n add_bg[i,j][k] = colors['Sky'][k] if i<300 else colors['Sea'][k]\r\n else: # else, we do indeed have RGB value of (0,0,0), then replace it by its entirety to the relevant\r\n # background color.\r\n add_bg[i,j] = colors['Sky'] if i<300 else colors['Sea']\r\n\r\n # Create PIL object of the manually added background with drawing on the canvas\r\n add_bg = Image.fromarray(add_bg)\r\n # Assign the path where the file will be saved\r\n if not os.path.exists(\"tmp/\"):\r\n os.makedirs(\"tmp/\")\r\n file_path = f\"tmp/pic%s.png\"%(len(counter))\r\n # Increase the counter by adding dummy element into the counter list\r\n counter.append(0)\r\n # Save the drawing in PNG format\r\n\r\n add_bg.save(file_path, \"PNG\")\r\n st.success(\"Image saved successfully. Keep drawing!!\")", "def capture_image(self):\n ext = self.image_save_type.lower()\n\n if self.calibrating:\n print('calibrating')\n\n if ext == 'fits':\n self.save_fits()\n self._image_counter += 1\n else:\n img = self.original_image\n path = os.path.join(self.home, 'data')\n name = \"camtrak_frame_{}.png\".format(self._image_counter) \n fn = os.path.join(path, name)\n cv2.imwrite(fn, img)\n\n QtWidgets.QApplication.beep()\n self.statusBar().showMessage(f'Saved image to {fn}')\n self._image_counter += 1", "def test(self,windowsize = False):\n\n # set up a specific window to test the text in\n if windowsize:\n self.screen = pygame.display.set_mode(windowsize)\n self.screen.fill((200,200,200))\n self.screen.blit(*self.blitinfo)\n\n # if no specific window is specified create a small one around the\n # outside of the text\n else:\n self.screen = pygame.display.set_mode((self.imagewidth + 20,self.imageheight + 20))\n self.screen.fill((200,200,200))\n self.screen.blit(self.image, (10,10))\n\n pygame.display.flip()\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()", "def test(self,windowsize = False):\n\n # set up a specific window to test the text in\n if windowsize:\n self.screen = pygame.display.set_mode(windowsize)\n self.screen.fill((200,200,200))\n self.screen.blit(*self.blitinfo)\n\n # if no specific window is specified create a small one around the\n # outside of the text\n else:\n self.screen = pygame.display.set_mode((self.imagewidth + 20,self.imageheight + 20))\n self.screen.fill((200,200,200))\n self.screen.blit(self.image, (10,10))\n\n pygame.display.flip()\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()", "def draw_screen(self, master_screen):\n master_screen.blit(self.screen_image, (0, 0))", "def save_full_canvas_as_png(self, output_fname):\n\n # put a sleep in here in case there is a dialog covering the screen\n # before this method is called.\n time.sleep(0.1)\n # TODO: are we missing a PIL.Image conversion here?\n im = self.save_currently_displayed_canvas_to_numpy_array()\n im.save(output_fname)", "def save_image(image, filename, mode='PNG'):\n out = Image.new(mode='L', size=(image['width'], image['height']))\n out.putdata(image['pixels'])\n if isinstance(filename, str):\n out.save(filename)\n else:\n out.save(filename, mode)\n out.close()", "def save_image(image, filename, mode='PNG'):\n out = Image.new(mode='L', size=(image['width'], image['height']))\n out.putdata(image['pixels'])\n if isinstance(filename, str):\n out.save(filename)\n else:\n out.save(filename, mode)\n out.close()", "def saveImage(self, file_name='./out.jpg'):\n frame = self.camera.get_frame()\n color = frame.color_image[0]\n cv2.imwrite(file_name, color)\n cv2.imshow('frame', color)\n cv2.waitKey()\n cv2.destroyAllWindows()", "def save_image(driver, outname=\"states.jpeg\", folder=None):\n\n # set the output folder\n if not folder:\n folder = os.path.dirname(os.path.abspath(__file__))\n\n # get thescreenshot\n binary = driver.get_screenshot_as_png()\n stream = io.BytesIO(binary)\n\n # save screenshot in jpeg format\n img = Image.open(stream)\n img = img.convert(mode=\"RGB\")\n img.save(folder + '/' + outname, format='JPEG', dpi=(200,200))", "def create_screen(self, width, height):", "def screen_capture(self, filename):\n self.command(f'CAPTURE SCREEN as {filename}')\n return None", "def get_image():\n\n # Access the global variable and activate the saving for the last camera's\n # frame\n global _save_image\n _save_image = True", "def save(self, filename):\n try:\n import PIL\n except ImportError:\n raise RuntimeError('Could not import PIL. PIL (pillow) is required to save fresnel images.')\n else:\n if self._output is None:\n self.render()\n image = PIL.Image.fromarray(self._output[:], mode='RGBA')\n image.save(filename)", "def take_screenshot(x, y, num=''):\n # screenshot takes starting x,y coordinates and then for how far the shot should stretch\n pic = pyautogui.screenshot(region=(0, y * 1.3, x * 0.75, y * 0.6))\n pic.save(\"Screenshot\" + str(num) + \".png\")", "def save_image(filename):\n subprocess(\"camera_save_image(%r)\" % filename)\n ##image = acquire_image()\n ##image.save(filename)", "def save(self):\n\n self.image.save(\"./output/\" + self.name + \" pg\" + str(self._page) + \".png\")", "def screenshot(self, name=None, scale=None):\n\n if not self.is_rendered:\n self.render(interactive=False)\n\n timestamp = datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n name = name or f\"brainrender_screenshot_{timestamp}\"\n if \".png\" not in name:\n name += \".png\"\n\n scale = scale or settings.SCREENSHOT_SCALE\n\n print(f\"\\nSaving new screenshot at {name}\\n\")\n\n savepath = str(self.screenshots_folder / name)\n logger.debug(f\"Saving scene at {savepath}\")\n self.plotter.screenshot(filename=savepath, scale=scale)\n return savepath", "def on_exists(fname):\n # type: (str) -> None\n #fname=\"../images/sct-160x160_460x635.PNG\"\n if os.path.isfile(fname):\n newfile = fname + '.old'\n print('{0} -> {1}'.format(fname, newfile))\n os.rename(fname, newfile)\n ############################\n #full screen \n ##########################\n\n \n with mss.mss() as sct:\n # The screen part to capture\n monitor = {'top': 0, 'left': 0, 'width': 1600, 'height': 800}\n output = './images/sct-{top}x{left}_{width}x{height}.png'.format(**monitor)\n # Grab the data\n sct_img = sct.grab(monitor)\n # Save to the picture file\n mss.tools.to_png(sct_img.rgb, sct_img.size, output)\n print(output)", "def screenshot(filename):\n call([\"screencapture\", \"Screenshot for\" + strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()) + filename +\".jpg\"])", "def setup_mode_saver(self):\n saver_icon = tk.PhotoImage(file = self.saver_icon)\n self.saver_button = tk.Button(\n self.toolbar,\n width = 24,\n height = 24,\n image = saver_icon,\n command = self.save_mode)\n self.saver_button.image = saver_icon\n self.saver_button.grid(row = 0, column = 1, sticky = tk.W)", "def save_image_action(self):\n self.view.save_image(self.settings.get_image_type())", "def write_stitched_image(self):\r\n\r\n self.write_debug(\"End of train detected. Writing stitched image.\")\r\n cv2.imwrite(os.path.join(self.output_dir_stitched, 'stitched.jpg'), self.stitched_image)", "def draw(self, screen):\n\n if self.exist:\n screen.blit(self._img, self._rect)", "def expose_test(self):\n with self.lock:\n self.dark = 1\n self.tstart = time.time()\n self.timestamp = time.strftime(\"%Y-%m-%dT%H:%M:%S\", time.localtime(self.tstart))\n imagesize = (self.expArea[3] - self.expArea[1],\n self.expArea[2] - self.expArea[0])\n self.data = np.ones(shape=imagesize, dtype=np.uint16)\n self.tend = time.time()", "def configure_screenshots(scenario):\r\n world.auto_capture_screenshots = False", "def test_save_fail():\n img = Image.new('RGB', (10, 20))\n\n parameters = {'path': 'foo.bar', 'data': [img]}\n\n images.save(parameters)", "def save_file(camera, frame):\n save = input(\"Would you like to save your drawing? Enter yes or no \")\n if save == \"yes\" or save == \"y\" or save == \"ye\" or save == \"yes \": # accounting for typos\n name = input(\"What would you like to name your masterpiece? \")\n filename = 'images/' + name + '.png'\n cv2.imwrite(filename, cv2.flip(frame,1)) # saves the image as the last frame\n camera.release()\n pygame.quit()\n\n # reopen saved picture to display for user\n img = cv2.imread(filename, 1)\n b,g,r = cv2.split(img) # get b,g,r\n rgb_img = cv2.merge([r,g,b]) # convert from bgr colorspace to rgb\n crop_img = rgb_img[36:450, 0:600] # crop out the colorbar\n cv2.imshow(filename, crop_img)\n cv2.imwrite(filename, crop_img)\n cv2.waitKey(10000)\n cv2.destroyAllWindows()\n camera.release()\n pygame.quit() # cleanup the camera and close any open windows\n else:\n print(\"Thank you for trying CVPaint!\")\n pygame.quit()\n camera.release()\n cv2.destroyAllWindows()", "def draw_failure(lang: str, mod: str) -> None:\r\n dirname = os.path.join(SCREENSHOT_DIR, mod)\r\n filename = dirname + r\"\\{}.png\".format(lang.split('/')[0])\r\n os.makedirs(dirname, exist_ok=True)\r\n imgdata = DRIVER.get_screenshot_as_png()\r\n with open(filename, mode='wb') as fil:\r\n fil.write(imgdata)", "def test_save_png():\n img = Image.new('RGB', (10, 20))\n\n parameters = {'path': 'green-dot.png', 'data': [img]}\n\n assert images.save(parameters)", "def test_image(filename, x_size=350, y_size=350):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n im.save(filename)\n return 'saved'", "def capture_screen(file_name, param=\"-m\", auto_fix = False):\r\n dir_path = os.path.dirname(file_name)\r\n if dir_path and not os.path.exists(dir_path):\r\n os.makedirs(dir_path)\r\n call = subprocess.Popen(\"scrot {} {}\".format(param, file_name), stderr=subprocess.PIPE, stdout=subprocess.PIPE, stdin=subprocess.PIPE, shell=True, env=os.environ)\r\n stdout, stderr = call.communicate(timeout=30)\r\n log.debug(\"[capture]info of capture,out:{},error:{}\".format(stdout, stderr))\r\n if auto_fix:\r\n img = cv2.imread(file_name)\r\n n1_f = np.sum(img, axis=2) == 0\r\n a_g = np.array([102, 102, 102])\r\n img[n1_f == True] = a_g\r\n cv2.imwrite(file_name, img)\r\n return file_name", "def gen_test_output(sess, logits, keep_prob, image_pl, screen, image_shape):\n image = scipy.misc.imresize(screen, image_shape)\n im_softmax = sess.run([tf.nn.softmax(logits)],{keep_prob: 1.0, image_pl: [image]})\n im_softmax = im_softmax[0][:, 1].reshape(image_shape[0], image_shape[1])\n segmentation = (im_softmax > 0.5).reshape(image_shape[0], image_shape[1], 1)\n mask = np.dot(segmentation, np.array([[0, 255, 0, 127]]))\n mask = scipy.misc.toimage(mask, mode=\"RGBA\")\n street_im = scipy.misc.toimage(image)\n street_im.paste(mask, box=None, mask=mask)\n street_im = np.array(street_im)\n\n return street_im", "def saveView(self, filename):\n\n pixmap = self.grab();\n ok = pixmap.save(filename)\n if not ok:\n raise IOError('Unable to save image ' + filename)", "def set_screen(self, size):\r\n self.screen = size", "def save_file(self, _filename):\n imgsize = (self.__resolution[0], self.__resolution[1])\n print imgsize\n\n if(self.__resolution[2] == 1):\n # grayscale -> convert to RGB\n bg_white = (255, 255, 255)\n img = Image.new(\"RGB\", imgsize, bg_white)\n\n for x in xrange(0, self.__resolution[0], 1):\n for y in xrange(0, self.__resolution[1], 1):\n col = self.get_color(_pos)\n # duplicate the channels\n ucharcol = (255 * col[0], 255 * col[0], 255 * col[0])\n img.putpixel((x, self.__resolution[1] - y - 1), ucharcol)\n\n elif(self.__resolution[2] == 3):\n # RGB\n bg_white = (255, 255, 255)\n img = Image.new(\"RGB\", imgsize, bg_white)\n\n for x in xrange(0, self.__resolution[0], 1):\n for y in xrange(0, self.__resolution[1], 1):\n col = self.get_color(_pos)\n ucharcol = (255 * col[0], 255 * col[1], 255 * col[2])\n img.putpixel((x, self.__resolution[1] - y - 1), ucharcol)\n\n elif(self.__resolution[2] == 4):\n # RGBA\n bg_white = (255, 255, 255, 255)\n img = Image.new(\"RGBA\", imgsize, bg_white)\n\n for x in xrange(0, self.__resolution[0], 1):\n for y in xrange(0, self.__resolution[1], 1):\n col = 255 * self.get_color((x, y))\n ucharcol = (int(col[0]), int(col[1]), int(col[2]), int(col[3]))\n img.putpixel((x, self.__resolution[1] - y - 1), ucharcol)\n else:\n raise StandardError, ('supported number of channels are 1, 3, and 4, only.')\n\n img.save(_filename)", "def screenshot(self):\n self.context.draw.window.screenshot(self.filename)", "def save_image(image):\n if config['save_images']['enabled']:\n directory = config['save_images']['destination']\n filename = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S%f\") + '.jpg'\n destination = os.path.join(directory, filename)\n logging.debug('saving image to %s', destination)\n f = open(destination, 'wb')\n f.write(image)\n f.close", "def measure_screen(self):\n outputs = self._get_images()\n\n if self.save_image_flag:\n self.save_images(outputs)\n return outputs", "def save_image(self, file_obj):\n manager = pyglet.image.get_buffer_manager()\n colorbuffer = manager.get_color_buffer()\n\n # if passed a string save by name\n if hasattr(file_obj, 'write'):\n colorbuffer.save(file=file_obj)\n else:\n colorbuffer.save(filename=file_obj)", "def _captureScreen(self, theEmulator, theRawFile, theDestFile, theFfmpeg):\n self.log.info('Capture Screen...')\n theEmulator.captureScreen(theDestFile)\n #theEmulator.captureScreenData(theRawFile)\n #if not os.path.exists(theRawFile):\n # self.log.info('Raw file %s does\\'nt exist! Cann\\'t use ffmpeg!')\n #else:\n # self.log.info('Convert raw file to png')\n # self.screenDataToPNG(theRawFile, theDestFile, theFfmpeg)", "def test_save_jpg():\n img = Image.new('RGB', (10, 20))\n\n parameters = {'path': 'green-dot.jpg', 'data': [img]}\n\n assert images.save(parameters)", "def step(machine, screen):\n op = machine.step()\n if op[1].startswith('DRAW') or op[1].startswith('CLS'):\n size = screen.get_size()\n image = Image.frombuffer('L', (64, 32), machine.framebuffer)\n image = ImageOps.colorize(image, '#111', '#0a0')\n image = image.resize(size, resample=Image.BOX)\n frame = pygame.image.frombuffer(image.tobytes(), size, 'RGB')\n screen.blit(frame, (0, 0))", "def draw(self, screen):", "def take_picture(self):\n self.drone.take_picture()", "def take_picture(self):\n self.drone.take_picture()", "def snapshot(filename=None, msg=\"\", quality=None, max_size=None):\n if not quality:\n quality = ST.SNAPSHOT_QUALITY\n if not max_size and ST.IMAGE_MAXSIZE:\n max_size = ST.IMAGE_MAXSIZE\n if filename:\n if not os.path.isabs(filename):\n logdir = ST.LOG_DIR or \".\"\n filename = os.path.join(logdir, filename)\n screen = G.DEVICE.snapshot(filename, quality=quality, max_size=max_size)\n return try_log_screen(screen, quality=quality, max_size=max_size)\n else:\n return try_log_screen(quality=quality, max_size=max_size)", "def live():\n m = camera.status.mode\n print \"Hit ^C to exit.\"\n print \"NOTE! After using this command, type: mode('%s') \" % m\n mode('centre')\n try:\n while True:\n f = camera.GetFits()\n camera.status.update()\n setheaders(f)\n camera.status.lastact = time.time() #Record the time that the last image was taken\n xpa.displayimage(f)\n except KeyboardInterrupt:\n logger.error(\"Live mode aborted, dumping image.\")\n finally:\n mode(m) #Restore original camera mode (hopefully)", "def take_screenshot(browser, test_name):\n screenshot_file_path = \"screenshots/{}.png\".format(test_name)\n browser.save_screenshot(screenshot_file_path)", "def dump_image(image, path_image):\n cv2.imwrite(path_image, image)\n return", "def set_test_mode(self):\n self._test_mode = True\n self._wins = 0\n self._draws = 0\n self._count = 0\n self._losses = 0", "def grabScreenshot(self):\n\n self.griddButton.setVisible(True)\n self.mirrorButton.setVisible(True)\n self.blurButton.setVisible(True)\n self.display1Button.setVisible(True)\n self.display2Button.setVisible(True)\n self.tutorialLabel.setVisible(False)\n\n print (\"Grabbing Screenshot\")\n print (\"Showing Buttons now\")\n\n with mss() as sct:\n monitor = sct.monitors[1]\n sct_img = sct.grab(monitor)\n # Convert to PIL/Pillow Image\n screenshots = Image.frombytes('RGB', sct_img.size, sct_img.bgra, 'raw', 'BGRX')\n screenshots.save(self.firstScreen, \"PNG\")\n\n # 2nd Display Screen shot\n\n monitor = sct.monitors[2]\n sct_img = sct.grab(monitor)\n # Convert to PIL/Pillow Image\n screenshots = Image.frombytes('RGB', sct_img.size, sct_img.bgra, 'raw', 'BGRX')\n screenshots.save(self.secondScreen, \"PNG\")\n self.photo.setPixmap(QtGui.QPixmap(self.firstScreen))\n self.statustext.setText(\"Added display 1 as work display for now\")\n self.ActivePhoto = \"Screenshot1.png\" # Set Photo as display 1 so we dont get callstack error when mirrroring", "def save(self):\n print(\"Clicked S(ave)\")\n saved_tiles = []\n for tile in self.tiles.sprites():\n # Append tiles pos to correct list if tile is occupied\n if not tile.is_available:\n tiles_attr = {\"type\": tile.tile_type, \"pos\": tile.rect.topleft}\n saved_tiles.append(tiles_attr)\n save_tiles(saved_tiles, lvl=\"02\")\n print(saved_tiles)\n # Flash white screen when level is saved\n self.surface.fill(s.WHITE)\n pygame.display.flip()\n pygame.time.wait(100)\n print(\"Saved\")", "def write_image(out, frame):\n if not os.path.exists(out):\n os.makedirs(out)\n now = datetime.now() \n dt_string = now.strftime(\"%H-%M-%S-%f\") \n filename = f'{out}/{dt_string}.png'\n logging.info(f'write image {filename}')\n cv2.imwrite(filename, frame)", "def DumpImageRAMForSimulation(sim, testcircuit, scope, imgSrc, pxPerClock,\n validityChecker, dstPath = None):\n imgData = loadImage(imgSrc, pxPerClock)\n sim.set_value(testcircuit.input_ren, [0], scope)\n sim.set_value(testcircuit.output_ren, [1], scope)\n sim.evaluate()\n imgResult = bitarray(endian='little')\n for i in range(imgData.numRows):\n imgResult.extend(sim.get_value(testcircuit.output_rdata, scope))\n bitsStartIndex = i * imgData.bitsPerRow\n bitsEndIndex = (i + 1) * imgData.bitsPerRow\n assert validityChecker(imgData, i, imgResult[bitsStartIndex:bitsEndIndex])\n sim.evaluate()\n sim.advance_cycle()\n sim.evaluate()\n if dstPath is not None:\n image = Image.frombytes(mode=\"RGB\", size=(10,10), data=imgResult.tobytes())\n image.save(dstPath)", "def get_screen():\n img_title = 'screen_' + g.client_id + '.png'\n image_path = STATIC_FILES_PATH + img_title\n if g.driver_status != WhatsAPIDriverStatus.LoggedIn:\n try:\n g.driver.get_qr(image_path)\n return send_file(image_path, mimetype='image/png')\n except Exception as err:\n pass\n g.driver.screenshot(image_path)\n return send_file(image_path, mimetype='image/png')", "def _save_screenshot_callback(self, _):\n\n self._curr_image_inc += 1\n image = self._screenshot_func()\n print(\"Captured image of shape\", np.shape(image))\n print(\"Current number of images:\", self._curr_image_inc)\n\n image.save(os.path.join(self._image_path, str(self._curr_image_inc) + '.png'))", "def getScreenAsImage():\n\treturn _getRectAsImage(None)", "def drawScreen(screen):\n screen.fill(BLACK) # Fill the screen with black.\n \n\n # Flip the display so that the things we drew actually show up.\n pygame.display.flip()", "def test_save_tif():\n img = Image.new('RGB', (10, 20))\n\n parameters = {'path': 'green-dot.tif', 'data': [img]}\n\n assert images.save(parameters)", "def store_results(image, table, cueballs):\n global CURRENT_FRAME, out_file\n #image.save('results/%d.jpg' % frame_no)\n if cueballs:\n best_ball = cueballs[0]\n if best_ball.confirmed:\n pygame.image.save(image, here('results/%d.jpg' % CURRENT_FRAME))\n out_file.write(\"%d %d %d\\n\" % (CURRENT_FRAME, best_ball.x, best_ball.y))", "def save_to_buffer(self) -> io.BytesIO:\n image = get_screenshot_as_png(self._layout)\n buffer = io.BytesIO()\n image.save(buffer, \"png\")\n return buffer", "def render_save(scene, cam, globalIdx, trajDir, camDir, NI=1280, NJ=720):\n #render image/convert to bimg\n expimg = scene.render(cam, NI, NJ);\n bimg = convert_image(expimg); \n exp_fname = trajDir + \"/exp_%(#)06d.png\"%{\"#\":globalIdx};\n save_image(bimg, exp_fname); \n\n #save cam\n cam_name = camDir + \"/cam_%(#)06d.txt\"%{\"#\":globalIdx}\n save_perspective_camera(cam, cam_name)\n remove_from_db([cam, expimg, bimg])", "def save(self, filename):\n print(\"Saving...\", end=\"\\r\")\n canvas = self.canvas[self.N:self.S,self.W:self.E]\n cv2.imwrite(\"./Output/\"+filename, canvas)\n print(\"Saved:\",filename)", "def saveImage(turtle, filename):\n ts = turtle.getscreen()\n tc = ts.getcanvas()\n tc.postscript(file=filename)", "def saveImage(self, observation):\n image_path = \"{}/{}/frame{:06d}\".format(self.data_folder, self.episode_folder, self.episode_step)\n relative_path = \"{}/{}/frame{:06d}\".format(self.name, self.episode_folder, self.episode_step)\n self.images_path.append(relative_path)\n # in the case of dual/multi-camera\n if observation.shape[2] > 3:\n observation1 = cv2.cvtColor(observation[:, :, :3], cv2.COLOR_BGR2RGB)\n observation2 = cv2.cvtColor(observation[:, :, 3:], cv2.COLOR_BGR2RGB)\n\n cv2.imwrite(\"{}_1.jpg\".format(image_path), observation1)\n cv2.imwrite(\"{}_2.jpg\".format(image_path), observation2)\n else:\n observation = cv2.cvtColor(observation, cv2.COLOR_BGR2RGB)\n cv2.imwrite(\"{}.jpg\".format(image_path), observation)", "def save_image(name, image):\n image_name = 'output/' + name + '.png'\n cv2.imwrite(image_name, image)", "def save_img(self):\r\n self.extract_info_from_file()\r\n path_0 = os.path.join(self.output_path, self.field_id, self.patient_id + self.ext)\r\n path_1 = os.path.join(self.output_path, self.field_id + '_' + self.instance, self.patient_id + self.ext)\r\n if self.shot == '0': # first shot\r\n if os.path.exists(path_0) or os.path.exists(path_1):\r\n print(self.patient_id, 'already done')\r\n pass\r\n else:\r\n if not self.img_computed:\r\n self.compute_img()\r\n if self.instance == '0':\r\n self.img.save(path_0)\r\n else:\r\n self.img.save(path_1)\r\n else: # newer shot\r\n if not self.img_computed:\r\n self.compute_img()\r\n if self.instance == '0':\r\n self.img.save(path_0)\r\n else:\r\n self.img.save(path_1)", "def draw(self, screen):\n if self.state == self.S_ACTIVE:\n screen.blit(self.image, self.rect)", "def _get_screen(self, cache=False, zone=None, screen=None):\n area = zone\n\n if screen is not None:\n img = screen\n elif cache:\n if self.cache_screenshot is None:\n raise errors.CacheError\n img = self.cache_screenshot\n else:\n return self._get_screenshot(area)\n\n if area is not None:\n return img.crop((area[0], area[1], area[0] + area[2], area[1] + area[3]))\n else:\n return img", "def screenshot(self, name):\n screenshot_name = str(self.screenshot_count) + \"_\" + name + \".png\"\n self.log(\"Taking screenshot: \" + screenshot_name)\n # on Android, switching context to NATIVE_APP for screenshot\n # taking to get screenshots also stored to Testdroid Cloud\n # device run view. After screenshot switching back to\n # WEBVIEW. Works ok for Safari too.\n orig_context = self.driver.current_context\n self.driver.switch_to.context(\"NATIVE_APP\")\n self.driver.save_screenshot(self.screenshot_dir + \"/\" + screenshot_name)\n # only change context if originally context was WEBVIEW\n if orig_context not in self.driver.current_context:\n self.driver.switch_to.context(orig_context)\n self.screenshot_count += 1", "def draw_end(self, screen):\n screen.fill(BLACK) \n game_over_pic = pygame.transform.scale(pygame.image.load('game_over_mushroom.jpg').convert(), [350, 350])\n screen.blit(game_over_pic, (SCREEN_W_MID-175, SCREEN_H_MID-175))", "def save_image(path, image): \n if len(image.shape) == 4:\n image = image.reshape((image.shape[1], image.shape[2], image.shape[3]))\n image = np.clip(image * 255.0, 0, 255).astype(np.uint8)\n skimage.io.imsave(path, image)", "def save_display_tile(tile, save=True, display=False):\n tile_pil_img = tile_to_pil_tile(tile)\n\n if save:\n t = Time()\n img_path = slide.get_tile_image_path(tile)\n dir = os.path.dirname(img_path)\n if not os.path.exists(dir):\n os.makedirs(dir)\n tile_pil_img.save(img_path)\n print(\"%-20s | Time: %-14s Name: %s\" % (\"Save Tile\", str(t.elapsed()), img_path))\n\n if display:\n tile_pil_img.show()", "def camera_save_image(filename):\n image = camera_acquire_image()\n image.save(filename)", "def _save(filename, img):\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n # filename = filename+'.png'\n filename = os.path.join(OUTPUT_DIR, filename)\n # print(filename, img.shape)\n cv.imwrite(filename, img)", "def __save_screenshot(path=\".\", file_name=\"current_time_stamp\", file_extension=\"png\"):\n\n # create folder for the screenshot to be saved\n try:\n os.makedirs(path, exist_ok=False)\n except FileExistsError as e:\n pass\n except BaseException as e:\n print(f\"unable to create folder {path}\")\n print(e)\n\n # generate filepath for the screenshot to be saved\n try:\n # generate the filename as current timestamp if not input\n if file_name == \"current_time_stamp\":\n _datenow = str(datetime.datetime.now())\n file_name = _datenow.replace(\" \", \"-\").replace(\":\", \"-\").replace(\".\", \"-\")\n\n _file_path = os.path.join(path, file_name + \".\" + file_extension)\n except BaseException as e:\n print(f\"unable to generate the file path\")\n print(e)\n\n # save the screenshot\n try:\n gui.screenshot(_file_path)\n return _file_path\n except BaseException as e:\n print(f\"unable to save the screenshot\")\n print(e)", "def expose(self):\n if self.camera is None: # test mode -- immediately return test image\n print(\"NO SPECTRAL CAMERA FOUND -- USING TEST DATA\")\n self.filename = \"example_fits_files/Mooi\"\n return\n\n exposure_time = self.time.get()\n try:\n self.exposure_time = float(exposure_time)\n except:\n message = \"Exposure time \\\"{0}\\\" cannot be converted to floating point number\".format(exposure_time)\n messagebox.showerror(\"Error\", message)\n raise ValueError(message)\n filename = \"spectra/{0}\".format(timestamp())\n self.camera.spectrum(self.exposure_time, filename)\n self.filename = filename" ]
[ "0.65687513", "0.64645797", "0.62103873", "0.6199405", "0.6150051", "0.6116136", "0.6103849", "0.6095868", "0.60331243", "0.6008139", "0.5992217", "0.5990289", "0.59354466", "0.5926195", "0.58971107", "0.5877742", "0.58629024", "0.5829966", "0.58243024", "0.57998353", "0.57982475", "0.57810163", "0.57530606", "0.5729971", "0.5725155", "0.5725155", "0.5699817", "0.5695005", "0.5672938", "0.5672938", "0.5671683", "0.5654909", "0.56535983", "0.56475836", "0.56395966", "0.56181866", "0.5614872", "0.5597766", "0.55864215", "0.55774796", "0.55590063", "0.5557919", "0.5554315", "0.55492455", "0.554735", "0.5514826", "0.5514786", "0.5510418", "0.5496859", "0.5496253", "0.54898953", "0.54870236", "0.5483024", "0.5481066", "0.5467943", "0.54663324", "0.5462866", "0.5460545", "0.54549265", "0.54488367", "0.5445773", "0.5435655", "0.543158", "0.54239994", "0.54230523", "0.5402132", "0.53987956", "0.53987956", "0.5391414", "0.5389772", "0.5363284", "0.53569", "0.5354478", "0.53465986", "0.5344175", "0.53435665", "0.5332945", "0.53268427", "0.532581", "0.5321583", "0.5319427", "0.53045213", "0.53024006", "0.52968997", "0.529513", "0.5291005", "0.5289435", "0.528459", "0.5272691", "0.5266484", "0.52612865", "0.52606297", "0.5258911", "0.5250437", "0.524567", "0.52403444", "0.5236588", "0.52336955", "0.5226713", "0.5221028" ]
0.7221238
0
Check 'frames' folder exists. If not, create it
def check_folder(directory): global path_checked if not os.path.exists(directory): os.makedirs(directory) else: path_checked = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copy_keyframes_from_frames(frames_dir, keyframes, name_dir=\"keyframes\", remove_frames_dir=True):\n # Create new directory if it doesn't already exist\n keyframes_dir = make_dir(name_dir, Path(frames_dir).parent)\n\n # Copy keyframes from frames directory, only if destiny directory is empty\n if len(os.listdir(keyframes_dir)) == 0:\n for i in keyframes:\n filename = Path(str(i)).with_suffix(\".jpg\")\n shutil.copyfile(frames_dir / filename, keyframes_dir / filename)\n\n print(\"Keyframes successfully extracted.\")\n else:\n print(f\"!!! The output directory '{keyframes_dir.name}' is not empty. Keyframes were not saved. !!!\")\n\n # Remove frames folder\n if remove_frames_dir:\n print(\"Removing frames ...\")\n shutil.rmtree(frames_dir)", "def if_exists(folder):\n if os.path.exists(os.path.join(BOT_FOLDER, folder)):\n return os.path.join(BOT_FOLDER, folder)\n else:\n os.mkdir(os.path.join(BOT_FOLDER, folder))\n return os.path.join(BOT_FOLDER, folder)", "def remove_old_frames():\n for filename in os.listdir(FRAMESDIR):\n file_path = os.path.join(FRAMESDIR, filename)\n try:\n if (os.path.isfile(file_path)\n and filename.endswith('.html')):\n os.unlink(file_path)\n except Exception as e:\n failure = 'Failed to delete {}'.format(file_path)\n print('{} {}'.format(failure, e))\n logger.warning(failure)\n return None", "def check_if_dir_exists():\n if not os.path.exists(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"logs\"):\n try:\n os.mkdir(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"logs\")\n logger.debug(\"Dir for logs has been created\")\n except OSError:\n logger.debug(f\"Creation of the directory {str(__CURRENT_DIRECTORY) + os.sep + '..' + os.sep + 'logs'} failed\")\n\n if not os.path.exists(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"db\"):\n try:\n os.mkdir(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"db\")\n logger.debug(\"Dir for DB has been created\")\n except OSError:\n logger.debug(f\"Creation of the directory {str(__CURRENT_DIRECTORY) + os.sep + '..' + os.sep + 'db'} failed\")", "def _check_dirs(self):\r\n for dir in [self.papers_dir,\r\n self.buffer_dir]:\r\n if not os.path.exists(dir):\r\n message = f'Dir not exists: {dir}. Making it.'\r\n logging.warning(message)\r\n os.mkdir(dir)", "def check_if_exists(self): \r\n dir_name = os.path.dirname(os.path.abspath(__file__))\r\n fucntion_dir = os.path.join(dir_name, 'openfaas', self.name)\r\n if not os.path.isdir(fucntion_dir):\r\n raise ValueError(\r\n f\"Function name `{self.name}` provided does not exist.\")\r\n self.yaml_path = os.path.join(fucntion_dir, f\"{self.name}.yml\")\r\n return True", "def check_already_extracted(video_parts):\n train_or_test, filename_no_ext, _ = video_parts\n return bool(os.path.exists(os.path.join('demo_frames',\n filename_no_ext + '-0001.jpg')))", "def checking_path():\n path = Path(\"phonebook\")\n try:\n path.mkdir(parents=True, exist_ok=False)\n except FileExistsError:\n pass\n else:\n pass", "def check_folder(filepath):\n if not os.path.exists(filepath):\n os.mkdir(filepath)\n return filepath", "def _check_or_create_dir(directory):\n if not tf.gfile.Exists(directory):\n tf.gfile.MakeDirs(directory)", "def test_ensure_dir_exists(self):\n pass", "def clean_frames(self):\n for fn in os.listdir(self.frame_directory):\n if fn.endswith(\".png\") and fn in self.frame_fns:\n os.remove(fn)", "def create_folder_if_needed(path):\n if os.path.exists(path):\n print(\"{} dir exists\".format(path))\n else:\n print(\"{} dir does not exist. Creating dir.\".format(path))\n os.mkdir(path)", "def _create_folder_if_not_exist(filename):\n os.makedirs(os.path.dirname(filename), exist_ok=True)", "def create_dir():\n if check_dir_exist():\n return False\n else:\n os.makedirs(path_structure)\n return True", "def chkPath(fullPath: str) -> None:\n\n # Check if path already exist.\n p = os.path.split(fullPath)\n exists = os.path.exists(p[0])\n # If not then create it.\n if exists == False:\n try:\n os.makedirs(p[0])\n except:\n print(\"Failed to create requested path.\")", "def create_folder(target_folder):\n try:\n os.makedirs(target_folder)\n except OSError as e:\n pass\n return os.path.exists(target_folder)", "def create_folder(target_folder):\n try:\n os.makedirs(target_folder)\n except OSError as e:\n pass\n return os.path.exists(target_folder)", "def check_dir(dir):\n if not os.path.exists(dir):\n print(\"[+] Creating directory for target..\")\n os.makedirs(dir)", "def prepare_folders():\n folder_list = [\"./data\", \"./data/stage\", \"./data/spoken\", \"./data/stage_lemmas\", \"./data/spoken_lemmas\"]\n for folder in folder_list:\n if not os.path.exists(folder):\n os.mkdir(folder)\n print(f\"Created folder {folder}\")\n else:\n print(f\"Folder {folder} already existed\")", "def check_build_exists(self):\n path = self.base_dir + \"/\" + self.app_name + \"/\" + \"build\"\n from django_swagger_utils.core.utils.check_path_exists import check_path_exists\n build_dir = check_path_exists(path)\n if build_dir:\n raise Exception(\"Build Directory Already Exist, please run update_specs_build\")", "def dirChecking(dir):\n if not os.path.exists(dir):\n os.mkdir(dir)", "def _check_path(path):\n os.system(\"if [ ! -d \" + path + \" ]; then mkdir -p \" + path + \"; fi\")", "def check_if_upload_folders_exist():\r\n\timport os\r\n\tfrom django.conf import settings\r\n\r\n\tif not os.path.exists(settings.MEDIA_ROOT):\r\n\t\tos.makedirs(settings.MEDIA_ROOT)\r\n\t\tprint(\"Created folder \" + settings.MEDIA_ROOT)\r\n\r\n\tif not os.path.exists(settings.EMAIL_FILE_PATH):\r\n\t\tos.makedirs(settings.EMAIL_FILE_PATH)\r\n\t\tprint(\"Created folder \" + settings.EMAIL_FILE_PATH)", "def create_directory():\r\n\r\n # Create directory for all lyrics\r\n try:\r\n os.mkdir(markovDir)\r\n except FileExistsError:\r\n pass", "def check_dir_exist(scheme):\n if os.path.exists(scheme.prefix) is False:\n print(\"ERROR: Required directory '{}' is missing! Exiting!\").format(scheme.prefix)\n sys.exit(1)", "def ensure_dirs_exists(self):\n os.makedirs(os.path.join(self.location, \"batches\"), exist_ok=True)\n os.makedirs(os.path.join(self.location, \"results\"), exist_ok=True)", "def check_create_folder(self, output, *folder_names):\n path = self.video_file\n\n # if none then create diectory on same level as video directory with the folder_name and video name\n if output is None:\n output = os.path.abspath(os.path.join(os.path.dirname(path), os.pardir, *folder_names))\n else:\n output = os.path.join(output, self.video_name)\n\n # if directory not existing create directory\n if not os.path.exists(output):\n print('created new directory: ', output)\n os.makedirs(output)\n\n return output", "def createFolder(self):\n self.destination = self.getPath() #Find the destination to create the folder\n try:\n os.makedirs(self.destination) #Try and make a folder\n except FileExistsError:\n pass #Otherwise continue if an error is encountered because the file exists already", "def check_folder(directory):\r\n if not os.path.exists(directory):\r\n os.makedirs(directory)\r\n print(\"making pictures folder\")\r\n print()\r\n else:\r\n print(\"pictures folder already exists\")\r\n print()", "def create_project_folder(self):\n\t\tif not os.path.exists(self.segment_path):\n\t\t\tfileutil.makedirs(self.segment_path)", "def save_frame(frame):\n try:\n img = Image.fromarray(frame.array, 'RGB')\n out_path = settings['app']['web_path']\n if not os.path.isabs(out_path):\n out_path = os.path.join(basepath, out_path)\n filename = os.path.join(out_path, 'static', 'latest.jpg')\n tmp_filename = '{}.part'.format(filename)\n img.save(tmp_filename, 'jpeg')\n os.rename(tmp_filename, filename)\n except Exception, error:\n print('Error saving frame: {}'.format(error))", "def isVideoFolder():", "def check_file(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def create_directories(self, app_label):\n for folder_name in [\"views\", \"urls\", \"templates/%s\" % app_label]:\n directory_path = \"%s/%s\" % (app_label, folder_name)\n if not os.path.exists(directory_path):\n os.makedirs(directory_path)", "def check_make(folder_check):\n if not os.path.isdir(folder_check):\n os.mkdir(folder_check)", "def _check_app_dir():\n if not os.path.exists(os.path.expanduser('~/.config/scheduler')):\n os.mkdir(os.path.expanduser('~/.config/scheduler'))", "def add_missing_dirs(self, index_file):\n if not os.path.exists(os.path.dirname(index_file)):\n os.system('echo \" -- Create missing \"' + index_file)\n os.makedirs(os.path.dirname(index_file))\n return True", "def ensure_dirs(cls, folder_path):\n try:\n cls.mkdirs(folder_path)\n except exceptions.PlotlyRequestError as e:\n if \"already exists\" in e.message:\n pass\n else:\n raise e", "def _check_directory(my_folder):\n if not os.path.exists(my_folder):\n os.makedirs(my_folder)", "def make_sure_path_exists(out_path):\n try:\n os.makedirs(out_path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n print \"Errors in output folder path! please change the output path or analysis name\\n\"\n exit()", "def check_dir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def create_test_folder_if_does_not_exist(path):\n print('')\n if os.path.exists(path):\n print(' Skip creation of existing folder: {}'.format(path))\n else:\n print(' Create non-existing test folder: {}'.format(path))\n os.makedirs(path, mode=0o775)", "def check_dir_exist():\n if os.path.isdir(path_structure):\n return True\n else:\n return False", "def mkdir(folder_name: str) -> None:\n if exist(folder_name):\n print(\"The folder is already exist\")\n return \n\n os.mkdir(folder_name)", "def create_base_projects_folder():\n if '.wcscanner' not in os.listdir(context.__BASE_PATH__):\n os.mkdir(context.__PROJECTS_PATH__, mode=0o777)\n log.info(\"Base folder '.wcscanner' created in %s\", context.__BASE_PATH__)\n else:\n log.info(\"Base folder '.wcscanner' already in %s\", context.__BASE_PATH__)", "def is_valid_animation(path, verbose=True):\n try:\n if \"idle\" in os.listdir(path) or \"transition\" in os.listdir(path):\n return True\n else:\n if verbose:\n print(path, \"is not a valid animation folder! It needs an /idle or /transition folder!\")\n return False\n except:\n return False", "def check_if_anim_exist(name, ext=vext, figpath=figpath):\n return not(os.path.isfile(format_filename(name, ext, figpath)))", "def findframe(startdir,camera,grating,filename):\n grating = re.sub(\"\\/\",\"_\",grating)\n gdir = camera + grating\n if os.path.isfile(os.path.join(startdir,filename)):\n return(startdir)\n else:\n # now the grating directory\n if os.path.isfile(os.path.join(startdir,gdir,filename)):\n return(os.path.join(startdir,gdir))\n poswavedirs = glob.glob(os.path.join(startdir,gdir) + '/w*')\n wavedirs = []\n\n for pwd in poswavedirs:\n if re.search(\"w\\d+\",pwd):\n wavedirs.append(pwd) \n\n\n for wd in wavedirs:\n # alright, now we search the subdirs\n if os.path.isfile(os.path.join(wd,filename)):\n return(wd)\n\n return(None)", "def process(self):\n try:\n existing_movie = os.listdir(self.movie_root_path)\n except:\n os.makedirs(self.movie_root_path)\n existing_movie = os.listdir(self.movie_root_path)\n\n if not [movie for movie in existing_movie if self.title.lower() in movie.lower()]:\n if self.has_video_extension:\n self.move_media()\n else:\n self.extract_media()\n else:\n Logger.log('[-] %s already exists. Skipping...' % self.title)", "def ensure_dir( dirName ):\r\n if not os.path.exists( dirName ):\r\n os.makedirs( dirName )", "def mkdir_if_missing(d):\n if not os.path.exists(d):\n os.makedirs(d)", "def dir_exists(self, path):\n if not path:\n return True\n return False", "def _directory(self):\n dir = self.target\n\n if not os.path.exists(dir):\n return os.makedirs(dir)\n return True", "def _create_folders(self):\n if not os.path.exists(os.path.join(BASE_DIR, DIR)):\n os.mkdir(os.path.join(BASE_DIR, DIR))\n directory = os.path.join(BASE_DIR, DIR, self.title)\n if not os.path.exists(directory):\n os.mkdir(directory)\n return directory", "def check_directories(root_directory):\n\n camdir = root_directory + \"/cam\"\n\n if not os.path.isdir(camdir):\n os.mkdir(camdir)\n\n if not os.path.isdir(camdir + \"/cfg\"):\n os.mkdir(camdir + \"/cfg\")\n\n if not os.path.isdir(camdir + \"/img\"):\n os.mkdir(camdir + \"/img\")\n\n if not os.path.isdir(camdir + \"/res\"):\n os.mkdir(camdir + \"/res\")\n\n if not os.path.isdir(camdir + \"/tmp\"):\n os.mkdir(camdir + \"/tmp\")", "def make_empty_directories_linux() -> None:\n mkdir(PICTURES_DIR / 'screenshots' / 'grim')\n mkdir(PICTURES_DIR / 'screenshots' / 'swappy')", "def assure_path_exists(self, path):\n\n dir = os.path.dirname(path)\n if not os.path.exists(dir):\n os.makedirs(dir)", "def test_create_already_exists():\n current_folder = os.path.dirname(os.path.realpath(__file__))\n misc_folder = os.path.join(current_folder, \"misc\")\n model_file = os.path.join(misc_folder, \"model.tar\")\n create.main(\"mlp\", \"10:12:8\", model_file)\n # TODO: Check if error was logged", "def check_basedir_writeable():\n if MEDLEYDB_PATH is None:\n raise EnvironmentError(\n \"The environment variable MEDLEYDB_PATH must be set \"\n \"to use the download module.\"\n )\n\n if not os.path.exists(MEDLEYDB_PATH):\n try:\n os.mkdir(MEDLEYDB_PATH)\n except:\n raise EnvironmentError(\n \"The value set for the MEDLEYDB_PATH does not exist and \"\n \"cannot be created.\"\n )\n\n if not os.path.exists(AUDIO_PATH):\n os.mkdir(AUDIO_PATH)\n\n global BASEDIR_WRITEABLE\n BASEDIR_WRITEABLE = True\n return True", "def generate_video_from_frames(path_to_frames, title):\r\n mean_height = 0\r\n mean_width = 0\r\n num_of_images = load_one_setting(settings_filename, 'MAX_CYCLES')\r\n os.chdir(path_to_frames)\r\n '''Loading all frames'''\r\n for file in os.listdir('.'):\r\n if file.endswith(\".jpg\") or file.endswith(\".jpeg\") or file.endswith(\"png\") or file.endswith(\"JPEG\"):\r\n im = Image.open(file)\r\n width, height = im.size\r\n mean_width += width\r\n mean_height += height\r\n\r\n mean_width = int(mean_width / num_of_images)\r\n mean_height = int(mean_height / num_of_images)\r\n\r\n for file in os.listdir('.'):\r\n if file.endswith(\".jpg\") or file.endswith(\".jpeg\") or file.endswith(\"png\") or file.endswith(\"JPEG\"):\r\n im = Image.open(file)\r\n imResize = im.resize((mean_width, mean_height), Image.ANTIALIAS)\r\n imResize.save(file, 'JPEG', quality=95)\r\n release_video(title)\r\n os.chdir(r'../..')", "def create_app_folders(self):\n\t\tif not os.path.exists(self.TEMP_FOLDER):\n\t\t\tos.makedirs(self.TEMP_FOLDER)\n\t\tif not os.path.exists(self.SAVE_FOLDER):\n\t\t\tos.makedirs(self.SAVE_FOLDER)", "def create_dataset_folder_structure():\n\n path = Path(f'{DATASETS}/{FEATURES_DATASET}')\n if not os.path.exists(path):\n print(f'\\nWARNING: The path does not exist. Creating new directory...\\n{path}\\n')\n os.mkdir(path)\n\n try:\n for path in new_sensor_paths:\n if not os.path.exists(path):\n print(f'\\nWARNING: The path does not exist. Creating new directory...\\n{path}\\n')\n os.mkdir(path)\n else:\n print(\"\\nPath already exists!\")\n except:\n return False\n else:\n return True", "def create_dirs(self):\n for new_directory in [self.event_dir, self.event_dir / 'videos']:\n new_directory.mkdir(exist_ok=self.overwrite)\n logger.debug('Dir {} created', new_directory)", "def test_existing_current_files(self):\n\n m = Mothur(**self.init_vars)\n self.set_current_dirs(m)\n m.summary.seqs(fasta='test_fasta_1.fasta')\n m.summary.seqs()\n\n return", "def ensure_path_exists(filename):\n targetdir = dirname(expanduser(filename))\n if exists(targetdir):\n return\n os.makedirs(abspath(targetdir))", "def _check_save_directory_path(self):\n if self.save_directory_path is not None:\n if os.path.exists(self.save_directory_path):\n raise ValueError(\n 'You must provide non-existing save output directory, '\n '{} given.'.format(self.save_directory_path))\n else:\n os.makedirs(self.save_directory_path)", "def checkPath(self, filename):\r\n if (not os.path.exists(filename)):\r\n filename = os.getenv('MDLROOT')+'/'+filename\r\n if (not os.path.exists(filename)):\r\n print \"[MDL] ERROR, FILE\", filename, \"DOES NOT EXIST.\"\r\n sys.exit(1)\r\n return filename", "def new_dir(the_dir):\n try:\n os.makedirs(the_dir)\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass #not a problem if file exists", "def checkExists(pd_dir):\n\n if os.path.isfile(pd_dir):\n print(\"Fatal error: need to store settings in {0}, but there is a file with that name\".format(pd_dir))\n exit(1)\n\n\n if not os.path.isdir(pd_dir):\n\n\n print(\"\"\"\n == Pre-release client for planning.domains ==\n\n This is pre-release software, for accessing the content on\n api.planning.domains. It is released without warranty\n (including the implied warranties of merchantability\n or fitness for a particular purpose).\n\n Send bug reports to Andrew Coles (andrew.coles@kcl.ac.uk)\n or Christian Muise (christian.muise@gmail.com)\n\n\"\"\")\n\n\n print(\"Making directory {0}...\\n\".format(pd_dir))\n try:\n os.mkdir(pd_dir)\n\n except OSError:\n print(\"Cannot make directory\")\n exit(1)", "def ensure_data_folder_existence() -> None:\n folder_name = params.DATA_FOLDER_NAME\n if not folder_name in os.listdir('.'):\n os.mkdir(folder_name)", "def mkdir_if_not_exists(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def _createRootAndExperimentFolder(self):\n\n # This should not happen\n if self._rootExportPath == \"\":\n return False\n\n # Make sure that the experiment folder does not already exist\n expPath = self._rootExportPath\n\n # Does the folder already exist?\n if os.path.exists(expPath):\n counter = 1\n ok = False\n while not ok:\n tmpPath = expPath + \"_\" + str(counter)\n if not os.path.exists(tmpPath):\n expPath = tmpPath\n ok = True\n else:\n counter += 1\n\n # Update the root and experiment paths\n self._rootExportPath = expPath\n self._experimentPath = os.path.join(self._rootExportPath,\n self._experimentName)\n\n # Create the root folder\n self._createDir(self._rootExportPath)\n\n # And now create the experiment folder (in the root folder)\n self._createDir(self._experimentPath)\n\n # Return success\n return True", "def CheckDir(dir):\n if not os.path.exists(dir):\n os.makedirs(dir)", "def get_frame_directory(self):\n return self.frame_directory", "def ensure_exists(output_dir):\n try:\n makedirs(output_dir)\n except OSError:\n if not isdir(output_dir):\n raise", "def ensure_path(full_path):\n full_path = Path(full_path)\n if not full_path.exists():\n full_path.mkdir(parents=True, exist_ok=True)", "def check_path(dir_path):\n if not os.path.exists(dir_path):\n os.mkdir(dir_path, 0755)", "def check_outpath(self, outpath):\n if not os.path.isdir(outpath+str(self.ar_no)):\n ar_outpath = os.path.join(outpath,str(self.ar_no))\n ar_outpath_video = os.path.join(outpath,str(self.ar_no)+'_video')\n os.makedirs(ar_outpath)\n os.makedirs(ar_outpath_video)\n print(\"Path does not exist, create: \")\n print(ar_outpath)\n print(ar_outpath_video)", "def check_dir(path):\n \n if not os.path.exists(path):\n os.makedirs(path)\n print path", "def check_path(fp):\n if not Path(fp).exists():\n\n if len(Path(fp).suffix) > 0: # check if file\n Path(fp).parent.mkdir(exist_ok=True, parents=True)\n\n else: # or directory\n Path(fp).mkdir(exist_ok=True, parents=True)", "def writeFrames(frames, name, test_name):\n openFile = lambda path: open(os.path.join(*path), 'w')\n path = ('data', 'json', test_name, name + '.js')\n try:\n out_file = openFile(path)\n except:\n # Path may be different, depending on where we get called from.\n # TODO: Fix this kludge.\n out_file = openFile(('tests', ) + path)\n\n out_file.write(test_name + '_' + name + ' = ' + json.dumps(frames))\n out_file.close()", "def test_raises_when_assignment_tests_directory_is_non_empty(\n self, tmp_path_factory, platform_url, workdir, rtd_path\n ):\n # arrange\n existing_assignment_dir = rtd_path / ASSIGNMENT_NAMES[0]\n existing_assignment_dir.mkdir(parents=True)\n\n # act/assert\n result = run_generate_rtd(\n base_url=platform_url, rtd=rtd_path, workdir=workdir\n )\n\n assert result.status == plug.Status.ERROR\n assert existing_assignment_dir.name in result.msg\n assert \"delete\" in result.msg", "def ensure_folder_exists(folder_path: str) -> None:\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)", "def ifnotexistmkdir(directory):\n if not os.path.exists(directory):\n os.mkdir(directory)\n return Path(directory)", "def prepare_destination(self):\n self.movie_root_path = self.config.share_movie_root_path % (\n self.share_path, self.title)\n\n if os.path.isdir(self.movie_root_path):\n if self.capacity_reached():\n Logger.log(\n '[!] Capacity reached. Skipping adding movie %s.' % self.title)\n else:\n if not os.path.isdir(self.movie_root_path):\n Logger.log('[+] Adding Movie: %s' % self.title)\n os.mkdir(self.movie_root_path)", "def create_folders(folder_name):\n\n if os.path.exists(downloads_path + '\\\\' + folder_name):\n pass\n else:\n os.makedirs(folder_name)\n print(f'Folder: {folder_name} has been created in {downloads_path}')", "def create_folder(self):\n path = os.path.expanduser('~') + \"/.ip_enrich/\"\n # Does it exist already?\n if os.path.isdir(path):\n return True\n try:\n os.mkdir(path)\n return True\n except Exception as e:\n print (f\"Creation of the directory {path} failed\")\n print (f\"Error {e}\")\n return False", "def _is_folder_exists() -> bool:\n\n pwd: str = os.getcwd()\n data_folder: str = os.path.join(pwd, \"data\")\n return os.path.isdir(data_folder)", "def create_dir(cls, relpath):\r\n safe_mkdir(os.path.join(cls.build_root, relpath))", "def check_output(self):\n directory, file = split(self.target)\n if not exists(directory):\n mkdir(directory)\n if exists(self.target):\n unlink(self.target)", "def create_experiment_folder(path_out, dir_name, name='', stamp_unique=True):\n assert os.path.exists(path_out), 'missing base folder \"%s\"' % path_out\n date = time.gmtime()\n if isinstance(name, str) and name:\n dir_name = '{}_{}'.format(dir_name, name)\n path_exp = os.path.join(path_out, dir_name)\n if stamp_unique:\n path_exp += '_' + time.strftime(FORMAT_DATE_TIME, date)\n path_created = None\n while not path_created:\n logging.warning('particular out folder already exists')\n if path_created is not None:\n path_exp += '-' + str(np.random.randint(0, 100))\n path_created = create_folder(path_exp, ok_existing=False)\n else:\n path_created = create_folder(path_exp, ok_existing=False)\n logging.info('created experiment folder \"%r\"', path_created)\n return path_exp", "def test_filecache_directory_already_exists(self, tmpdir, sess):\r\n url = self.url + ''.join(sample(string.ascii_lowercase, randint(2, 4)))\r\n\r\n # Make sure our cache dir DOES exist\r\n tmp_cache = tmpdir.join('missing', 'folder', 'name').strpath\r\n os.makedirs(tmp_cache, self.cache.dirmode)\r\n\r\n assert os.path.exists(tmp_cache)\r\n\r\n self.cache.directory = tmp_cache\r\n\r\n # trigger a cache save\r\n sess.get(url)\r\n\r\n assert True # b/c no exceptions were raised\r", "def ensure_dir_exists(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def folder_guard(folder_path):\n if not os.path.isdir(folder_path):\n print('INFO:folder_guard(): Creating folder: ' + folder_path + '...')\n os.mkdir(folder_path)", "def mkdir_p(start_path):\n try:\n os.makedirs(start_path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(start_path):\n pass", "def check_dir(directory):\n if not os.path.exists(directory):\n os.makedirs(directory)", "def _ensure_directory(self, dirname):\n if not os.path.exists(dirname):\n os.makedirs(dirname)", "def create_folder(folder):\n\n try:\n os.mkdir(folder, 0740)\n except OSError:\n return False\n else:\n return True", "def ensure_dir(filename):\n d = os.path.dirname(filename)\n if not os.path.exists(d):\n os.makedirs(d)", "def create_directory_structure():\n\n def ensure_directory(path):\n try:\n os.makedirs(path)\n\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n ensure_directory('./out/textures')\n ensure_directory('./out/data')" ]
[ "0.5819628", "0.58033806", "0.57717586", "0.5712013", "0.5633541", "0.5623171", "0.5622375", "0.56030816", "0.56009656", "0.557692", "0.5512916", "0.55081576", "0.55065334", "0.549381", "0.5465493", "0.5456761", "0.54223925", "0.54223925", "0.54124236", "0.5407644", "0.5358195", "0.5302531", "0.5300398", "0.52954054", "0.5277151", "0.52758586", "0.5260074", "0.5255658", "0.5251971", "0.52511144", "0.52419066", "0.52262264", "0.5221715", "0.5215909", "0.5215443", "0.52024156", "0.5185488", "0.51842266", "0.5183366", "0.5176406", "0.5170073", "0.5168676", "0.51676387", "0.5164788", "0.51646376", "0.5162185", "0.51587933", "0.5143026", "0.51343316", "0.5124948", "0.51243573", "0.5108182", "0.5101079", "0.5097787", "0.5091784", "0.5087354", "0.50853485", "0.50725377", "0.5071924", "0.50712645", "0.50646657", "0.50579554", "0.50530815", "0.50522363", "0.50490725", "0.5039673", "0.5034364", "0.5031071", "0.5030446", "0.5029613", "0.5027086", "0.50202686", "0.5019766", "0.50162995", "0.50147706", "0.5013583", "0.5008625", "0.50061333", "0.5005258", "0.50040174", "0.49993822", "0.49965927", "0.49946505", "0.49914706", "0.4988232", "0.49875242", "0.49848616", "0.49844489", "0.49843493", "0.495963", "0.49469453", "0.49350047", "0.4932509", "0.49310866", "0.49308023", "0.49304873", "0.4929268", "0.49229357", "0.491997", "0.49170545", "0.4913762" ]
0.0
-1
Return the FPS, or if video_mode is true, return the video FPS
def get_fps(clock): if video_mode: return "30" # Video FPS will be 30 else: return str(int(round(clock.get_fps(), 0)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_video_fps(self):\n fps = self.video.get(cv2.CAP_PROP_FPS)\n logging.info('Video FPS: {}'.format(fps))\n return fps", "def get_fps(video, use_opencv=False):\n\n if use_opencv:\n video_cap = cv2.VideoCapture(video)\n fps = video_cap.get(cv2.CAP_PROP_FPS)\n video_cap.release()\n return fps\n else:\n return convert_to_float(\n utils.exec_shell_command(f\"{SHELL_CMD_GET_FPS} '{video}'\", silent=True)[0]\n )", "def get_fps(self):\n \n return self.fps, self.average_fps", "def get_frame_rate(video):\n\n video = cv2.VideoCapture(video)\n\n # Find OpenCV version\n (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')\n\n # With webcam get(CV_CAP_PROP_FPS) does not work.\n # Let's see for ourselves.\n\n if int(major_ver) < 3:\n fps = video.get(cv2.cv.CV_CAP_PROP_FPS)\n logger.info(\"Frames per second using video.get(cv2.cv.CV_CAP_PROP_FPS): {0}\".format(fps))\n else:\n fps = video.get(cv2.CAP_PROP_FPS)\n logger.info(\"Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}\".format(fps))\n\n cv2.destroyAllWindows()\n video.release()\n return fps", "def get_fps(self):\n raise NotImplementedError(\"get_fps is not implemented\")", "def get_fps(self):\n return self.fps", "def calculate_fps(self):\n time_difference = self.time_array[-1] - self.time_array[0]\n time_difference_in_seconds = time_difference.to_sec()\n if time_difference_in_seconds == 0:\n pass\n self.fps = self.buffer_size / time_difference_in_seconds\n rospy.loginfo(\"[EulerianMotionMagnification] Estimated FPS: \" + str(self.fps) + \" (Measured timespan: \" + str(time_difference_in_seconds) + \"s)\")\n rospy.loginfo(\"[EulerianMotionMagnification] Video array length: \" + str(len(self.video_array)))", "def read_fps(vid_name):\n\n video = cv2.VideoCapture(vid_name)\n fps = video.get(cv2.CAP_PROP_FPS)\n video.release()\n\n return fps", "def get_fps(self):\n return self._num_frames / (datetime.now() - self._start).total_seconds()", "def get_fps(self):\n # Take difference.\n interframe_intervals = np.diff(self.data[\"t\"])\n\n # Inter-frame interval in milliseconds.\n mean_interval = np.mean(interframe_intervals)\n fps = round(1 / (mean_interval / 1000))\n\n return int(fps)", "def get_fps(self):\n if len(self.times) >= 2:\n dif = np.diff(self.times)\n fps = 1. / dif.min()\n # if the FPS crosses 500, do not update it\n if fps <= 500:\n self.fps = fps\n return self.fps\n else:\n return 0.", "def max_fps(self):\n return self._max_fps", "def get_fps(self):\n if not self.count_fps:\n logging.error(\"No FPSCounter set\")\n return None\n return self.fps.get_fps()", "def get_fps(self):\n if not self.count_fps:\n logging.error(\"No FPSCounter set\")\n return None\n return self.fps.get_fps()", "def get_fps(self):\n return self._document.GetFps()", "def fps(self):\n\t\treturn float(len(self.buf)) / (self.buf[-1][0] - self.buf[0][0])", "def get_video_frame_rate(filename):\n clip = VideoFileClip(filename)\n frame_rate = clip.fps\n clip.close()\n return frame_rate", "def getFrameRate(self):\n if not self.proxy:\n self.proxy = self.session.service(\"ALVideoRecorder\")\n return self.proxy.getFrameRate()", "def get_fps(self):\n #return int(self._root.knob('fps').getValue())\n return None", "def getFPS(self):\n # load it each time, since this setting is not limited to a single user\n projectSettingsDB = self.loadProjectSettings()\n try:\n fpsValue = projectSettingsDB[\"FPS\"]\n return fpsValue\n except KeyError:\n msg = \"Database Error while reading projectSettings.json\"\n logger.error(msg)\n return None", "def get_video_property(video_path):\n\tvideocap = cv2.VideoCapture(video_path)\n\tframe_count = videocap.get(cv2.CAP_PROP_FRAME_COUNT)\n\tfps = videocap.get(cv2.CAP_PROP_FPS)\n\theight = videocap.get(cv2.CAP_PROP_FRAME_HEIGHT)\n\twidth = videocap.get(cv2.CAP_PROP_FRAME_WIDTH)\n\treturn frame_count, fps, width, height", "def calcFrameRate(self):\n\n tot = 0\n count = 0\n for session in self.sessions:\n for sample in session.samples:\n if not sample.isLoading:\n tot += sample.fps\n count += 1\n if count:\n self.avgFps = tot / count\n self.lowFps = (self.avgFps < 10)\n self.highFps = (self.avgFps > 25)", "def framerate(self, interval=None):\n total_frames = 0.0\n\n # These are times when SF begins compositing.\n vsync_events = self.event_intervals(name='VSYNC-sf', interval=interval)\n if not vsync_events:\n vsync_events = self.event_intervals(name='VSYNC', interval=interval)\n\n for vsync_event_a, vsync_event_b in zip(vsync_events, vsync_events[1:]) : \n frames_presented = len(self.event_intervals('postFramebuffer', \n interval=vsync_event_a.interval))\n # Below required to skip interval when we had nothing to do.\n # As this event 'toggles' every VSYNC when SurfaceFlinger has work\n # to do. If nothing is done (i.e. no 'postFramebuffer' events)\n # there was jank in this interval.\n if vsync_event_a.value != vsync_event_b.value and frames_presented:\n total_frames += frames_presented\n \n present_time = self.present_duration(interval=interval)\n return round(total_frames/present_time, 1) if present_time != 0.0 else float('nan')", "def video_and_fps_from_h5py(data_file, video_type='b-mode'):\n\n video_type = video_type.lower()\n if video_type == 'b-mode':\n dataset = data_file['tissue']\n elif video_type == 'tvi':\n dataset = data_file['TVI']\n else:\n raise RuntimeError(\"video_type must be either 'b-mode' or 'tvi'\")\n\n video = dataset['data']\n video = np.transpose(video, [2, 1, 0])\n\n dt = dataset['times'][1] - dataset['times'][0]\n fps = int(1 / dt)\n\n return video, fps", "def framerate(self):\n return self.config.get('framerate', 15)", "def get_framerate(self):\n return self._framerate", "def duration():\r\n elapsed_time, duration = video_time()\r\n return duration", "def read_video_info_cv2(vid_fp: str):\n cap = cv2.VideoCapture(vid_fp)\n total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)\n total_seconds = int(total_frames / cap.get(cv2.CAP_PROP_FPS))\n cap.release()\n return timedelta(seconds=total_seconds)\n # cap.set(cv2.CAP_PROP_POS_AVI_RATIO, 1)\n # total_ms = cap.get(cv2.CAP_PROP_POS_MSEC)\n # return timedelta(milliseconds=total_ms)", "def gif_fps(self):\n return self._gif_fps", "def get_video_frame_rate(video_filename):\n if not os.path.exists(video_filename):\n raise ValueError(\"%s does not exist\" % video_filename)\n \n probe = ffmpeg.probe(video_filename)\n assert len(probe['streams']) == 1\n \n # Seems to be two ways of coding, not sure which is better\n avg_frame_rate = probe['streams'][0]['avg_frame_rate']\n r_frame_rate = probe['streams'][0]['r_frame_rate']\n assert avg_frame_rate == r_frame_rate\n \n # Convert fraction to number\n num, den = avg_frame_rate.split('/')\n frame_rate = float(num) / float(den)\n \n return frame_rate", "def get_maya_framerate():\n current_unit = pm.currentUnit(time=True, query=True)\n if current_unit == 'game':\n framerate = 15.\n elif current_unit == 'film':\n framerate = 24.\n elif current_unit == 'pal':\n framerate = 25.\n elif current_unit == 'ntsc':\n framerate = 30.\n elif current_unit == 'show':\n framerate = 48.\n elif current_unit == 'palf':\n framerate = 50.\n elif current_unit == 'ntscf':\n framerate = 60.\n elif 'fps' in current_unit:\n \tframerate = float(current_unit.split('fps')[0])\n else:\n pm.currentUnit(time='film')\n framerate = 24.\n\n return framerate", "def video_bitrate(self):\n assert self.has_video\n\n return self.__video_bitrate", "def get_video_params(video_filename):\n \n width, height = get_video_aspect(video_filename)\n frame_rate = get_video_frame_rate(video_filename)\n return width, height, frame_rate", "def current_fps(self, fps: float):\n self.current_fps_label.setText(\"FPS: {0:.2f}\".format(fps))", "def elapsed_time():\r\n elapsed_time, duration = video_time()\r\n return elapsed_time", "def fps(x, y, i):\n\n # Special case for the edges.\n if i < 2:\n return (y[i+1] - y[i]) / (x[i+1] - x[i])\n elif i > len(x) - 3:\n return (y[i] - y[i-1]) / (x[i] - x[i-1])\n\n else:\n h = x[i] - x[i-1]\n f0 = y[i]\n f1 = y[i+1]\n f2 = y[i+2]\n f3 = y[i-1]\n f4 = y[i-2]\n return (-f2 + 8*f1 - 8*f3 + f4) / (12 * h)", "def set_fps(self, fps=25):\n raise NotImplementedError(\"set_fps is not implemented\")", "def show_fps(self, screen):\n fps = self.clock.get_fps()\n self.pgtext.display_text(\"FPS: {0:.2f}\".format(fps), screen, 600, 10)", "def display_fps(self):\n template = \"{} - FPS: {:.2f}\"\n caption = template.format(prepare.CAPTION, self.clock.get_fps())\n pg.display.set_caption(caption)", "def video_bitrate(self):\n # type: () -> int\n return self._video_bitrate", "def video_duration(self):\n # type: () -> int\n return self._video_duration", "def limit_fps(fps):\n global _last_update\n elapsed = time.time() - _last_update\n if elapsed < 1 / fps:\n time.sleep(1 / fps - elapsed)\n _last_update = time.time()", "def display_fps(self):\n caption = \"{} - FPS: {:.2f}\".format(CAPTION, self.clock.get_fps())\n pg.display.set_caption(caption)", "def display_fps(self):\n caption = \"{} - FPS: {:.2f}\".format(CAPTION, self.clock.get_fps())\n pg.display.set_caption(caption)", "def _live_title(self, fps):\n\n et = np.int(np.round(self.camera.actual_exposure_time_ms))\n return 'Live %d ms %.2f FPS' % (et, fps)", "def is_Long_Enough(video):\r\n fps = video.get(cv.CAP_PROP_FPS)\r\n numFrames = video.get(cv.CAP_PROP_FRAME_COUNT)\r\n\r\n if((numFrames / fps) >= __speed__):\r\n return True\r\n else:\r\n return False", "def bspb_frameCounter():\n curTime = int(pm.currentTime())\n maxTime = int(pm.playbackOptions(q=True, maxTime=True))\n return '{0} / {1}'.format(curTime, maxTime)", "def video_faster_activated(self):\n\n if self.playerType == VLC and self.playMode == VLC:\n\n if self.play_rate + self.play_rate_step <= 8:\n self.play_rate += self.play_rate_step\n self.mediaplayer.set_rate(self.play_rate)\n\n # second video together\n if self.simultaneousMedia:\n self.mediaplayer2.set_rate(self.play_rate)\n self.lbSpeed.setText('x{:.3f}'.format(self.play_rate))\n\n logging.info('play rate: {:.3f}'.format(self.play_rate))", "def realFrameNumber(self, callback=None):\n count = 0\n theoreticalFrameNumber = int(self.video.get(cv2.CAP_PROP_FRAME_COUNT))\n if theoreticalFrameNumber > 30000:\n return theoreticalFrameNumber\n while(True):\n # Capture frame-by-frame\n ret, frame = self.video.read()\n if not ret:\n break\n if callback != None:\n callback(0.1 + (count / theoreticalFrameNumber) * 0.75, \"Calculating the number of frame\")\n count += 1\n return count", "def find_aspect_fps(aspect, msg, msg_type, fps):\n if msg_type == \"SINFO\" and msg[1] == \"0\":\n if msg[2] == \"20\":\n # aspect comes wrapped in \"\" remove them\n aspect = msg[4].replace('\"', '').strip()\n elif msg[2] == \"21\":\n fps = msg[4].split()[0]\n fps = fps.replace('\"', '').strip()\n fps = float(fps)\n return aspect, fps", "def get_fps(vid_path, prefix):\n _, name = os.path.split(vid_path)\n i1 = name.find(prefix) + len(prefix)\n i2 = name[i1:].find('_')\n fps = int(name[i1:i1+i2])\n\n return fps", "def get_duration(self) -> int:\n return int( (self._frame_count / self._fps) * 1000 )", "def update_frame(self):\r\n while not self.stopped:\r\n if not self.grabbed or not self.cap.isOpened():\r\n self.stop()\r\n else:\r\n self.grabbed, self.frame = self.cap.read()\r\n try:\r\n if self.grabbed:\r\n #self.New_Frame_Time = time.time()\r\n #self.FPS = 1/(self.New_Frame_Time-self.Old_Frame_Time)\r\n #self.Old_Frame_Time = self.New_Frame_Time\r\n self.FrameCount += 1\r\n else:\r\n print(f'Grabbed status is: {self.grabbed}')\r\n #self.Old_Frame_Time = time.time()\r\n except ZeroDivisionError:\r\n print(\"Division by zero error when finding video feed fps\")\r\n self.FPS = 0\r\n self.Old_Frame_Time = time.time()", "def update_fps(self, fps):\n self.fps_history.append(fps)\n if len(self.fps_history) > FPS_AVERAGES:\n self.fps_history.pop(0)\n\n self.fps_estimate = np.mean(self.fps_history)\n return self.fps_estimate", "def resolution(self):\n assert self.has_video\n\n return self.__resolution", "def video(path):\n cap = cv2.VideoCapture(path)\n frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n return frame_count, cap", "def video_to_frames(video_filename,output_dir):\n cap = cv2.VideoCapture(video_filename)\n video_length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) - 1\n vid_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n vid_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n vid_fps = int(cap.get(cv2.CAP_PROP_FPS))\n print(\"vid_res=%d x %d, fps=%d\\n\" % (vid_width, vid_height,vid_fps))\n crop_width=int(vid_width/128)*128\n crop_height=int(vid_height/128)*128\n grab_step=int(vid_fps/2)\n if cap.isOpened() and video_length > 0:\n count = 0\n frame_id=0\n success, image = cap.read()\n while success and frame_id <= 9999:\n if count%grab_step==0:\n crop_img = image[0:crop_width, 0:crop_height]\n resized_img = cv2.resize(crop_img, (128, 128)) \n cv2.imwrite(output_dir+\"/frame%05d.jpg\" % frame_id, resized_img)\n frame_id+=1\n success, image = cap.read()\n count += 1\n return 0", "def plot_fps(img, seconds):\n print(\"{0}ms\".format(seconds*1000))\n img = cv2.putText(\n img,\n 'Frame time: {0}ms'.format(round(seconds*1000, 2)),\n (20, 50),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.5,\n (0, 0, 0),\n 1,\n cv2.LINE_AA,\n )\n return img", "def get_video_info(filename: str) -> Dict:\n cap = cv2.VideoCapture(filename)\n if cap.isOpened():\n fps = int(round(cap.get(cv2.CAP_PROP_FPS)))\n width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n duration = frame_count / fps\n cap.release()\n video_info = {\n 'fps': fps,\n 'width': width,\n 'height': height,\n 'frame_count': frame_count,\n 'duration': duration\n }\n return video_info\n else:\n raise ValueError(f'OpenCV failed to open {filename}.')", "def state(self) -> int | None:\n\n if self.coordinator.data:\n data = self.coordinator.data.get(self._cam_name, {}).get(\n f\"{self._fps_type}_fps\"\n )\n if data is not None:\n try:\n return round(float(data))\n except ValueError:\n pass\n return None", "def plot_video(video, fps=1):\n global vis\n opts = dict(fps=int(fps))\n win = vis.video(video, opts=opts)\n return win", "def unit_of_measurement(self) -> str:\n return FPS", "def unit_of_measurement(self) -> str:\n return FPS", "def frame_rate():\n def r(x):\n return 6E7/x\n\n def w(x):\n return int(6E7/x)\n return r, w", "def video_format(self):\n return self.__video_format", "def num_frames(self):\n return len(self.video)", "def run(input_video_file, output_video_file):\n print(\"Debut de la transformation du format de la video\")\n #récupération de la vidéo\n video = cv2.VideoCapture(input_video_file)\n #fps de la vidéo\n fps = video.get(cv2.CAP_PROP_FPS)\n #largeur des images de la vidéo\n width_video = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))\n #hauteur des images de la vidéo\n height_video = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))\n #nombre d'images dans la vidéo\n frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))\n #durée de la vidéo\n duration = frame_count/fps\n #nouvelle durée de la vidéo (on arrondi)\n new_duration = math.floor(duration)\n #nouveau fps de la vidéo\n new_fps = float(round(fps))\n #appliquer le nouveau fps\n video.set(cv2.CAP_PROP_FPS,new_fps)\n #appliquer la nouvelle durée\n print(new_duration)\n print(new_fps)\n print(new_duration*new_fps)\n new_frame_count = new_duration*new_fps\n video.set(cv2.CAP_PROP_FRAME_COUNT,new_duration*new_fps)\n #déffinition du format de la vidéo en sortie\n video_out = cv2.VideoWriter(output_video_file,0x7634706d,new_fps,(width_video,height_video),True)\n \n count = 0\n #ouverture de la vidéo\n while(video.isOpened()):\n #lecture image par image\n ret, frame = video.read()\n if ret==True:\n\n #ecriture de l'image dans la vidéo en sortie\n video_out.write(frame)\n count = count + 1\n \n if (count > (new_frame_count-1)):\n # Libérer la vidéo\n video.release()\n break\n else:\n break\n\n print(\"fin de la transformation\")\n #fermer les vidéos\n video.release()\n video_out.release()", "def get_video_aspect(video_filename):\n if not os.path.exists(video_filename):\n raise ValueError(\"%s does not exist\" % video_filename)\n \n probe = ffmpeg.probe(video_filename)\n assert len(probe['streams']) == 1\n width = probe['streams'][0]['width']\n height = probe['streams'][0]['height']\n \n return width, height", "def frame_rate(self):\n return self._frame_rate", "def changeFps(self):\n check = re.search(\"[1-9][0-9]*\", self.ui.t_fps.text())\n if check:\n num = check.group()\n fps = int(num)\n if fps > self.fps_limit:\n logging.warning(\n \"Too big number for fps. Falling back to {0} fps.\".format(\n self.fps_limit\n )\n )\n fps = self.fps_limit\n self.fps = fps\n self.ui.t_fps.setText(str(fps))\n else:\n logging.info(\"Wrong Input For Fps\")\n self.ui.t_fps.setText(\"30\")\n self.fps = 30", "def frame_size(self):\n size = None\n if self.is_video():\n width = self.__dict__['width']\n height = self.__dict__['height']\n if width and height:\n try:\n size = (int(width), int(height))\n except ValueError:\n raise FFProbeError(\"None integer size %s:%s\" % (width, height))\n\n return size", "def run(self):\n i = 0\n t = time.time()\n while True:\n i = i + 1\n ret, frame = self.stream.read()\n if (i == 20):\n self.fps = 20/(time.time() - t)\n t = time.time()\n i = 0\n #If a frame is None need to re-init it: \n # - close a stream;\n # - reopen it;\n # - read frame again\n if frame is None:\n self.stream.release()\n self.stream = cv2.VideoCapture(self.url)\n ret, frame = self.stream.read()\n text = time.strftime('%Y-%m-%d %H:%M:%S')\n if (self.fps > 0):\n text = text + ' FPS: ' + str(round(self.fps))\n self.frame = cv2.putText(frame, text, (10, int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)) - 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2)\n self.frameID = uuid.uuid4()", "def play_video_file(fname : str):\n cap = cv2.VideoCapture(fname)\n fps = cap.get(5)\n font = cv2.FONT_HERSHEY_SIMPLEX\n fontScale = 1\n fontColor = (0, 0, 0)\n lineType = 2\n\n myvideo = []\n while cap.isOpened():\n ret, frame = cap.read()\n\n if ret is True:\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n cv2.putText(gray, 'Time: ' + str(round(cap.get(0) / 1000, 2)),\n (10, 30),\n font,\n fontScale,\n fontColor,\n lineType)\n cv2.putText(gray, 'Frame: ' + str(int(cap.get(1))),\n (10, 70),\n font,\n fontScale,\n fontColor,\n lineType)\n myvideo.append(gray)\n #cv2.imshow('frame', gray)\n #cv2.waitKey(10)\n #if cv2.waitKey(delay=2) & 0xFF == ord('q'):\n # break\n else:\n break\n\n cap.release()\n\n if fps < 60:\n for frame in myvideo:\n cv2.imshow('frame', frame)\n cv2.waitKey(10)\n else:\n for ind, frame in enumerate(myvideo):\n if ind % 3 == 0:\n cv2.imshow('frame', frame)\n cv2.waitKey(10)\n else:\n continue\n cv2.destroyAllWindows()", "def get_video_size(self):\n # Get original size of video stream\n caps = self.imagesink.sinkpad.get_current_caps()\n if caps is None:\n return None\n\n # Assume these are simple caps with a single struct.\n struct = caps.get_structure(0)\n return (struct.get_int('width')[1], struct.get_int('height')[1])", "def state(self) -> int | None:\n if self.coordinator.data:\n data = self.coordinator.data.get(\"detection_fps\")\n if data is not None:\n try:\n return round(float(data))\n except ValueError:\n pass\n return None", "def getTotalFramesVid(srcVideoPath):\n cap = cv2.VideoCapture(srcVideoPath)\n # if the videoCapture object is not opened then exit without traceback\n if not cap.isOpened():\n print(\"Error reading the video file !!\")\n return 0\n\n tot_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)\n cap.release()\n return tot_frames", "def ms_to_frames(time, fps):\n return int(round(float(time) / 1000 * fps))", "def get_score(fps: float, ssim: float,\n ssim_max: float = 30.,\n rate: float = 10.) -> float:\n fps = np.round(fps, 1)\n ssim = np.round(ssim, 3)\n return 0.1 * np.log(fps) / np.log(rate) + min(ssim, ssim_max)", "def output_statistics(self, run_time):\n fps = self.received_frames / run_time\n MBps_per_frame = self.full_frame_length() / 1000.0 / 1000.0\n print '\\nRun time: %.2f seconds' % (run_time,)\n print 'Received frames: ', self.received_frames\n print 'Avg. frame rate: %s fps' % (fps,)\n print 'Avg. Bit rate: %.2f MB/s' % (MBps_per_frame * fps,)", "def frame_size(self) -> tuple[int, int]:\n return (int(self.get(cv.CAP_PROP_FRAME_HEIGHT)),\n int(self.get(cv.CAP_PROP_FRAME_WIDTH)))", "def __videoThread(self):\n\n self.frameList = []\n\n fpsTimer = FpsTimer(self.fps)\n printf(\"Starting videoStream thread.\")\n while self.running:\n fpsTimer.wait()\n if not fpsTimer.ready(): continue\n if self.setCamera is not None: self.__setNewCamera(self.setCamera)\n if self.paused: continue\n if self.cap is None: continue\n\n\n # Get a new frame\n ret, newFrame = self.cap.read()\n\n if not ret: # If a frame was not successfully returned\n printf(\"ERROR: while reading frame from Cam. Setting camera again...\")\n self.__setNewCamera(self.cameraID)\n cv2.waitKey(1000)\n continue\n\n\n # Do frame related work\n with self.frameLock:\n self.frame = newFrame\n\n # Add a frame to the frameList that records the 5 latest frames for Vision uses\n self.frameList.insert(0, self.frame.copy())\n # print(\"len\", len(self.frameList), \"Curr frames: \", [id(frame) for frame in self.frameList])\n while len(self.frameList) > 10:\n del self.frameList[-1]\n\n # Keep track of new frames by counting them. (100 is an arbitrary number)\n if self.frameCount >= 100:\n self.frameCount = 0\n else:\n self.frameCount += 1\n\n\n # Run any work functions that must be run. Expect no results. Work should be run before filters.\n if len(self.workList) > 0:\n # print(\"Work: \", self.workList)\n with self.workLock:\n for workFunc in self.workList:\n workFunc(self.frame)\n\n\n\n # Run any filters that must be run, save the results in self.filterFrame\n if len(self.filterList) > 0:\n # print(\"Filters: \", self.filterList)\n with self.filterLock:\n filterFrame = self.getFrame()\n for filterFunc in self.filterList:\n filterFrame = filterFunc(filterFrame)\n\n # Draw FPS on the screen\n fps = str(int(round(fpsTimer.currentFPS, 0)))\n cv2.putText(filterFrame, fps, (10, 20), cv2.FONT_HERSHEY_PLAIN, 1.25, (255, 255, 255), 2)\n\n self.filterFrame = filterFrame\n\n\n else:\n self.filterFrame = self.frame\n\n printf(\"VideoStream Thread has ended\")", "def update_fps(self):\n self.fps.tick()\n\n\trange_str = \"\"\n gd = self.main_curve_dialog.curve.get_data()[1]\n\trange_str = \"Max: %s, Min: %s, Avg: %0.5s \" \\\n\t\t % (numpy.max(gd), numpy.min(gd), numpy.average(gd))\n\n\n fps_text = \"%s Update: %s FPS\" % (range_str, self.fps.rate())\n self.action_fps_display.setText(fps_text)", "def video_config():\n\tcap = cv2.VideoCapture(0)\n\twhile not cap.isOpened():\n\t cap = cv2.VideoCapture(0)\n\t cv2.waitKey(10)\n\t print \"Wait for the header\"\n\tpos_frame = cap.get(cv2.cv.CV_CAP_PROP_POS_FRAMES)\n\t\n\treturn cap, pos_frame", "def get(self, flag: int):\n if flag == cv2.CAP_PROP_FRAME_COUNT:\n return self.length", "def play_video(self,\r\n video=None,\r\n window_name=None,\r\n mode=MANUAL_MODE,\r\n mouse_callback_func=None):\r\n if mouse_callback_func==None:\r\n mouse_callback_func = self.dummy_mouse_call_back_func\r\n zoom = 1\r\n if video is None:\r\n video = self.video_buffer\r\n if window_name==None:\r\n window_name = 'Video Player'\r\n cv2.namedWindow(window_name)\r\n cv2.setMouseCallback(window_name, mouse_callback_func)\r\n idx=0\r\n self.current_frame = video[idx]\r\n self.img = self.current_frame.copy()\r\n while True:\r\n cv2.imshow(window_name, self.img)\r\n k = cv2.waitKey(30) & 0xFF\r\n if mode==MANUAL_MODE:\r\n if k==ord('f'):\r\n idx = min(idx+1, len(video)-1)\r\n self.current_frame = cv2.resize(video[idx], None, fx=zoom, fy=zoom, \\\r\n interpolation=cv2.INTER_AREA)\r\n self.img = self.current_frame.copy()\r\n if k==ord('b'):\r\n idx = max(0, idx-1)\r\n self.current_frame = cv2.resize(video[idx], None, fx=zoom, fy=zoom, \\\r\n interpolation=cv2.INTER_AREA)\r\n self.img = self.current_frame.copy()\r\n if k==ord('i'):\r\n zoom *= 1.33\r\n self.current_frame = cv2.resize(video[idx], None, fx=zoom, fy=zoom, \\\r\n interpolation=cv2.INTER_AREA)\r\n self.img = self.current_frame.copy()\r\n if k==ord('o'):\r\n zoom *= 0.75\r\n self.current_frame = cv2.resize(video[idx], None, fx=zoom, fy=zoom, \\\r\n interpolation=cv2.INTER_AREA)\r\n self.img = self.current_frame.copy()\r\n\r\n if k==27:\r\n break\r\n cv2.destroyAllWindows()\r\n self.x /=zoom\r\n self.ix/=zoom\r\n self.y /=zoom\r\n self.iy/=zoom\r\n \r\n self.poly = (np.asarray(self.poly)/zoom).astype(int)", "def getVideoFrames(videoFilePath, startFrameNumber=-1, endFrameNumber=-1):\n frames=[]\n vidcap = cv2.VideoCapture(videoFilePath)\n fps=vidcap.get(cv2.CAP_PROP_FPS)\n totalFrame=vidcap.get(cv2.CAP_PROP_FRAME_COUNT)\n if startFrameNumber==-1:\n startFrameNumber = 0\n if endFrameNumber == -1:\n endFrameNumber = totalFrame - 1\n success,image = vidcap.read()\n count = 0\n success = True\n while success:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = color.rgb2yiq(image).astype(np.float32)\n \n if count<startFrameNumber:\n success,image = vidcap.read()\n count+=1\n continue\n elif count>=endFrameNumber:\n break\n else: \n frames.append(image)\n success,image = vidcap.read()\n count += 1\n frames=np.array(frames)\n \n return fps, frames", "def imageMode(modename,movielist=[1]):\n\n moviedeq = []\n i2 = 0 \n for filenumber in movielist:\n cap = cv2.VideoCapture(glob.glob(\"*_\" + str(filenumber) + \".avi\")[0])\n ret,frame = cap.read()\n storedFrame = grayBlur(frame)\n totalFrames = 0\n while(cap.isOpened()):\n ret,frame = cap.read()\n if ret == False:\n break\n currentFrame = grayBlur(frame)\n if totalFrames < 50:\n if totalFrames % 3 == 0:\n moviedeq.append(currentFrame)\n totalFrames += 1\n storedFrame = currentFrame \n i2 += 1\n testing = calc_mode(moviedeq, np.zeros([ydim,xdim]))\n cv2.imwrite(\"mode_\" + modename + \".png\", testing)\n cap.release()\n cv2.destroyAllWindows()", "def video_playback_width(self):\n # type: () -> int\n return self._video_playback_width", "def get_video_statistics(self):\n return self._video_statistics", "def main_func_video_camera(param_list: list = None) -> bool:\r\n # index of param\r\n # noinspection PyPep8Naming\r\n PORT_RAW_PICT = 0\r\n\r\n # check if param OK\r\n if len(param_list) != 1:\r\n log_error_to_console(\"GET FRAME VIDEO CAPTURE MAIN FUNCTION PARAM NOK\", str(len(param_list)))\r\n return False\r\n else:\r\n port_image = get_port_from_wave(name=param_list[PORT_RAW_PICT])\r\n\r\n try:\r\n # noinspection PyUnresolvedReferences\r\n success, port_image.arr[:] = global_var_handler.VIDEO.read()\r\n if success is True:\r\n port_image.set_valid()\r\n except BaseException as error:\r\n is_error()\r\n # noinspection PyUnresolvedReferences\r\n log_error_to_console('RAW PICTURE NOK TO READ: ' + str(global_var_handler.VIDEO.__str__()), str(error))\r\n port_image.set_invalid()\r\n pass\r\n\r\n # noinspection PyUnresolvedReferences\r\n log_to_file(str(global_var_handler.FRAME))\r\n # noinspection PyUnresolvedReferences\r\n log_to_file(global_var_handler.STR_L0_SIZE)\r\n\r\n return True", "def analyze_movie(\n video_path, aspect_ratio=0, palette_size=32, frames=-1, step=1, show_frames=False, show_last_frame=False, color_format='hex'\n):\n\n # Parse video frame-by-frame\n vidcap = cv2.VideoCapture(video_path)\n success, image = vidcap.read()\n pil_img = None\n count = 0\n while success and frames == -1 or count < frames:\n if count % step == 0:\n # Convert to PIL image\n img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n pil_img = Image.fromarray(img)\n\n # Crop frame to remove border\n if aspect_ratio != 0:\n width, height = pil_img.size\n left = 0\n right = width\n content_height = 1/aspect_ratio * width\n border = (height - content_height) * 0.5\n top = border\n bottom = border + content_height\n pil_img = pil_img.crop((left, top, right, bottom))\n\n # Get primary color\n main_color = get_primary_color(\n pil_img, palette_size, show_img=show_frames)\n\n if color_format == 'hex':\n main_color = rgbToHex(main_color)\n \n print(main_color)\n\n # Attempt to read next frame\n success, image = vidcap.read()\n count += 1\n\n if show_last_frame:\n pil_img.show()", "def get_session_videodata(videos):\n # Get first frame of first video for future processing and number of frames in each video\n videos_data = {'Frame rate': [], 'Number frames': []}\n for idx, videofile in enumerate(videos):\n cap = cv2.VideoCapture(videofile)\n videos_data['Frame rate'].append(cap.get(cv2.CAP_PROP_FPS))\n videos_data['Number frames'].append(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)))\n videos_data['Cumu. Num Frames'] = np.cumsum(videos_data['Number frames'])\n return videos_data", "def video_slower_activated(self):\n\n if self.playerType == VLC and self.playMode == VLC:\n\n if self.play_rate - self.play_rate_step >= 0.1:\n self.play_rate -= self.play_rate_step\n self.mediaplayer.set_rate(self.play_rate)\n\n # second video together\n if self.simultaneousMedia:\n self.mediaplayer2.set_rate(self.play_rate)\n\n self.lbSpeed.setText('x{:.3f}'.format(self.play_rate))\n\n logging.info('play rate: {:.3f}'.format(self.play_rate))", "def video_window_height(self):\n # type: () -> int\n return self._video_window_height", "def __call__(self, video_path, per_frames = 1 , offset = None):\n \n cap = cv2.VideoCapture(video_path)\n \n if not cap.isOpened():\n raise Exception(\"Video file does not exist or is invalid\")\n\n \n if offset:\n cap.set(cv2.CAP_PROP_POS_MSEC, offset)\n \n \n info = []\n\n while cap.isOpened():\n ret, frame = cap.read()\n if ret:\n if cap.get(cv2.CAP_PROP_POS_FRAMES) % per_frames == 0:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n faces_info = self.detect_faces_from_image(frame,\n desired_width=224, desired_height=224) \n if faces_info:\n for element in faces_info:\n face_img = image.img_to_array(element[1])\n\n face_img = utils.preprocess_input(face_img, version=1)\n face_img = np.expand_dims(face_img, axis=0)\n\n features = self.vgg_feature_extractor.predict(face_img)\n label = self.gender_svm.predict(features)[0]\n decision_value = round(self.gender_svm.decision_function(features)[0], 3)\n\n bounding_box = element[0][0]\n detection_score = round(element[5], 3)\n bbox_length = bounding_box.bottom() - bounding_box.top()\n\n info.append([\n cap.get(cv2.CAP_PROP_POS_FRAMES), bounding_box, (bbox_length, bbox_length), label,\n decision_value, detection_score\n ])\n\n else:\n break\n cap.release()\n info = pd.DataFrame.from_records(info, columns = ['frame', 'bb', 'size','label', 'decision', 'conf'])\n return info", "def main_func_video(param_list: list = None) -> bool:\r\n # index of param\r\n # noinspection PyPep8Naming\r\n PORT_RAW_PICT = 0\r\n\r\n # check if param OK\r\n if len(param_list) != 1:\r\n log_error_to_console(\"GET FRAME VIDEO MAIN FUNCTION PARAM NOK\", str(len(param_list)))\r\n return False\r\n else:\r\n port_image = get_port_from_wave(name=param_list[PORT_RAW_PICT])\r\n\r\n try:\r\n # noinspection PyUnresolvedReferences\r\n success, port_image.arr[:] = global_var_handler.VIDEO.read()\r\n if success is True:\r\n port_image.set_valid()\r\n except BaseException as error:\r\n is_error()\r\n # noinspection PyUnresolvedReferences\r\n log_error_to_console('RAW PICTURE NOK TO READ: ' + str(global_var_handler.VIDEO.__str__()), str(error))\r\n port_image.set_invalid()\r\n pass\r\n\r\n # noinspection PyUnresolvedReferences\r\n log_to_file(str(global_var_handler.FRAME))\r\n # noinspection PyUnresolvedReferences\r\n log_to_file(global_var_handler.STR_L0_SIZE)\r\n\r\n return True", "def get_frame_rate(filename):\n cmd = ('ffprobe -v 0 -of flat=s=_ -select_streams v:0 -show_entries '\n 'stream=avg_frame_rate -of default=nokey=1:noprint_wrappers=1 ' +\n filename).split()\n pid = subprocess.run(cmd, stdout=subprocess.PIPE,\n universal_newlines=True)\n if pid.returncode != 0:\n return None\n\n frame_rate_exp = pid.stdout.rstrip()\n try:\n frame_rate = float(frame_rate_exp)\n except:\n if frame_rate_exp == 'N/A':\n frame_rate = 0.\n else:\n numerator, denominator = map(float, frame_rate_exp.split('/'))\n if denominator == 0:\n frame_rate = 0\n else:\n frame_rate = numerator / denominator\n return frame_rate", "def video_playback_height(self):\n # type: () -> int\n return self._video_playback_height", "def delta_time(self):\n delta_time = time.time() - self.time\n if delta_time >= 1.0 / self.target_fps:\n self.time = time.time()\n # end if\n return delta_time", "def get_fps_from_sketch(sketch_file_path):\n fps = -1\n # get fps\n with open(sketch_file_path) as json_file:\n data = json.load(json_file)\n fps = float(data[\"fps\"])\n return fps" ]
[ "0.7892058", "0.7523606", "0.7171496", "0.7074355", "0.705509", "0.7029802", "0.69614834", "0.69196635", "0.69127625", "0.6722967", "0.66790813", "0.66567814", "0.6546592", "0.6546592", "0.653519", "0.65346336", "0.6509575", "0.64402926", "0.6350182", "0.6329676", "0.6317738", "0.62529945", "0.62510175", "0.6250377", "0.61761034", "0.6153939", "0.60294247", "0.59857106", "0.598304", "0.5885418", "0.58526605", "0.58393604", "0.583303", "0.58278805", "0.57913435", "0.573843", "0.5712439", "0.5711182", "0.5697065", "0.56821495", "0.5674893", "0.5654549", "0.5619513", "0.5619513", "0.5589483", "0.55857915", "0.55855656", "0.55744743", "0.5569778", "0.5563023", "0.55345273", "0.5529424", "0.5509501", "0.5500785", "0.54916006", "0.54899216", "0.5481493", "0.54663074", "0.54612905", "0.54424495", "0.5422567", "0.54190063", "0.54190063", "0.5403701", "0.53682286", "0.5367354", "0.5345165", "0.53405964", "0.528491", "0.52829546", "0.5281796", "0.52667874", "0.52539223", "0.52336365", "0.52279603", "0.5227008", "0.52199453", "0.521204", "0.52072793", "0.52015424", "0.52014434", "0.5196263", "0.5189954", "0.5186503", "0.51581913", "0.51515967", "0.5147511", "0.5128472", "0.5125918", "0.51237655", "0.51166505", "0.5116083", "0.5075872", "0.50712776", "0.50707245", "0.5066059", "0.50632524", "0.50629133", "0.50622284", "0.50551665" ]
0.74950314
2
Creates an ordered puzzle of dimension lengthXlength with length > 1
def __init__(self, length): if (length <= 1 or length.__class__ != int): try: raise ValueError('length must be an integer greater than 1') except: print('something went wrong!') raise self.__length = length self.__possibilities = (True, False, True, False) # up, down, left, right self.__blank_box = (length-1, length-1) self.__board = [[i*length+j+1 for j in range(length)] for i in range(length)] self.__board[length-1][length-1] = None self.__sorted = deepcopy(self.__board) self.__previous_move = ''
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_board(size) -> list:\n return list(itertools.product([i for i in range(size)], repeat=2))", "def buildpuzzle(self):\r\n self.puzzle = copy.deepcopy(self.rows)\r\n if self.difficulty == 1:\r\n self.removedigits(1)\r\n if self.difficulty == 2:\r\n self.removedigits(2)\r\n if self.difficulty == 3:\r\n self.removedigits(3)", "def puzzle_02() -> None:\n\n containers = load_containers()\n combinations_lengths = tuple(map(\n lambda combination: len(combination),\n filter(lambda combination: sum(combination) == EGGNOG_LITRES,\n [combination\n for i in range(len(containers))\n for combination in combinations(containers, i)])))\n print_puzzle_solution(combinations_lengths.count(min(combinations_lengths)))", "def create_possibilities(runs: List[int], length: int) -> List[List[int]]:\n possibilities = []\n\n def _create_possibilities(runs, mark, min_start, possibility):\n #Base Case: add current possibility and return\n if len(runs) == 0:\n possibilities.append(possibility)\n return\n elif min_start >= len(possibility):\n return\n for start in range(min_start, len(possibility) - runs[0] +1 ):\n #Make a copy of the array for every position that the run can start in\n p_copy = copy.deepcopy(possibility)\n for i in range(start, start+runs[0]):\n if(i < len(possibility)):\n p_copy[i] = mark\n else:\n return\n _create_possibilities(runs[1:], mark+1, start + runs[0]+1, p_copy)\n\n empty_possibility = [0 for i in range(length)]\n _create_possibilities(runs, 1, 0, empty_possibility)\n\n return possibilities", "def puzzle_01() -> None:\n\n containers = load_containers()\n print_puzzle_solution(len(tuple(filter(\n lambda combination: sum(combination) == EGGNOG_LITRES,\n [combination\n for i in range(len(containers))\n for combination in combinations(containers, i)]))))", "def setPuzzle():\n matrix = tuple() # This will be a tuple of tuples to hold the original puzzle set\n\n matrix += ((0, 25, 0, 21, 0, 4, 0, 8, 0, 17, 0),)\n matrix += ((12, 22, 13, 8, 18, 8, 0, 18, 2, 13, 8),)\n matrix += ((0, 14, 0, 24, 0, 21, 0, 22, 0, 22, 0),)\n matrix += ((5, 13, 26, 20, 0, 16, 20, 9, 13, 7, 13),)\n matrix += ((0, 7, 0, 5, 0, 20, 0, 3, 0, 0, 9),)\n matrix += ((20, 16, 22, 0, 0, 0, 0, 0, 21, 17, 3),)\n matrix += ((17, 0, 0, 8, 0, 23, 0, 1, 0, 21, 0),)\n matrix += ((9, 21, 10, 11, 4, 20, 0, 10, 21, 3, 18),)\n matrix += ((0, 18, 0, 4, 0, 8, 0, 13, 0, 3, 0),)\n matrix += ((7, 22, 6, 21, 0, 18, 21, 25, 17, 20, 18),)\n matrix += ((0, 9, 0, 18, 0, 19, 0, 8, 0, 15, 0),)\n\n return matrix", "def create_grid(size):\n grid = []\n for i in range(size):\n row = ['0']*size\n grid.append(row)\n\n return grid", "def make_board(N):\n assert N >= 1, \"Invalid board dimension\";\n assert type(N) == int, \"N must be an integer\";\n return [[\"*\" for x in range(N)] for x in range(N)];", "def make_board(side_len):\n assert side_len > 0, 'Board size should be > 0.'\n return [[random.choice(string.ascii_lowercase) for _ in range(side_len)] for _ in range(side_len)]", "def make_sudoku(size):\r\n def mutate_list_1(lst, size):\r\n \"\"\"Helper function for removing part of a list from the beginning and add it to the end.\"\"\"\r\n count = 0\r\n while count < size:\r\n elem = lst[0]\r\n lst.remove(elem)\r\n lst.append(elem)\r\n count += 1\r\n return lst\r\n\r\n def mutate_list_2(lst):\r\n \"\"\"Helper function for removing element from the beginning of a list and add it to the end.\"\"\"\r\n elem = lst[0]\r\n lst.remove(elem)\r\n lst.append(elem)\r\n return lst\r\n\r\n count = 0\r\n matrix_length = size ** 2 # define a size of matrix\r\n matrix = [[] * matrix_length] # create an empty matrix\r\n matrix[0] = range(1, matrix_length + 1) # set a first row to a range from 1 to size ** 2\r\n while count < matrix_length - 1:\r\n l = matrix[count][:] # create a new list object that is a copy of previous row in a matrix\r\n if (count + 1) % size == 0: # check if a row in inner square of a matrix\r\n l = matrix[count - (size-1)][:] # if it is, l set to the first row of previous square\r\n matrix.append(mutate_list_2(l))\r\n else:\r\n matrix.append(mutate_list_1(l, size)) # mutate l and add it to the matrix\r\n count += 1\r\n\r\n\r\n return matrix", "def create_knapsack_packing_problems_with_manual_solutions(can_print=False):\n\n problems, solutions = list(), list()\n\n start_time = time.time()\n\n # Problem 11\n\n max_weight = 200.\n container_shape = Polygon([(0, 0), (0, 2330), (5867, 2330), (5867, 0)])\n container = Container(max_weight, container_shape)\n items = [Item(MultiPolygon([(Point(5, 5).buffer(321, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(216, 4).exterior.coords)])]), 5., 190.),\n Item(MultiPolygon([(Point(5, 5).buffer(321, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(216, 4).exterior.coords)])]), 5., 190.),\n Item(MultiPolygon([(Point(5, 5).buffer(321, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(216, 4).exterior.coords)])]), 5., 190.),\n Item(MultiPolygon([(Point(5, 5).buffer(321, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(216, 4).exterior.coords)])]), 5., 190.),\n Item(MultiPolygon([(Point(5, 5).buffer(321, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(216, 4).exterior.coords)])]), 5., 190.),\n\n Item(MultiPolygon([(Point(5, 5).buffer(402, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(250, 4).exterior.coords)])]), 4., 215.),\n Item(MultiPolygon([(Point(5, 5).buffer(402, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(250, 4).exterior.coords)])]), 4., 215.),\n Item(MultiPolygon([(Point(5, 5).buffer(402, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(250, 4).exterior.coords)])]), 4., 215.),\n Item(MultiPolygon([(Point(5, 5).buffer(402, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(250, 4).exterior.coords)])]), 4., 215.),\n Item(MultiPolygon([(Point(5, 5).buffer(402, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(250, 4).exterior.coords)])]), 4., 215.),\n\n Item(MultiPolygon([(Point(5, 5).buffer(427, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(250, 4).exterior.coords)])]), 3., 235.),\n Item(MultiPolygon([(Point(5, 5).buffer(427, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(250, 4).exterior.coords)])]), 3., 235.),\n Item(MultiPolygon([(Point(5, 5).buffer(427, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(250, 4).exterior.coords)])]), 3., 235.),\n Item(MultiPolygon([(Point(5, 5).buffer(427, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(250, 4).exterior.coords)])]), 3., 235.),\n Item(MultiPolygon([(Point(5, 5).buffer(427, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(250, 4).exterior.coords)])]), 3., 235.),\n\n Item(MultiPolygon([(Point(5, 5).buffer(487, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(333, 4).exterior.coords)])]), 2., 235.),\n Item(MultiPolygon([(Point(5, 5).buffer(487, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(333, 4).exterior.coords)])]), 2., 235.),\n Item(MultiPolygon([(Point(5, 5).buffer(487, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(333, 4).exterior.coords)])]), 2., 235.),\n Item(MultiPolygon([(Point(5, 5).buffer(487, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(333, 4).exterior.coords)])]), 2., 235.),\n Item(MultiPolygon([(Point(5, 5).buffer(487, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(333, 4).exterior.coords)])]), 2., 235.),\n\n Item(MultiPolygon([(Point(5, 5).buffer(562, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(333, 4).exterior.coords)])]), 1., 315.),\n Item(MultiPolygon([(Point(5, 5).buffer(562, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(333, 4).exterior.coords)])]), 1., 315.),\n Item(MultiPolygon([(Point(5, 5).buffer(562, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(333, 4).exterior.coords)])]), 1., 315.),\n Item(MultiPolygon([(Point(5, 5).buffer(562, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(333, 4).exterior.coords)])]), 1., 315.),\n Item(MultiPolygon([(Point(5, 5).buffer(562, 4).exterior.coords,\n [tuple(Point(5, 5).buffer(333, 4).exterior.coords)])]), 1., 315.)]\n problem = Problem(container, items)\n problems.append(problem)\n\n solution = Solution(problem)\n solutions.append(solution)\n\n print_if_allowed(solution.add_item(0, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(1, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(2, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(3, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(4, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(5, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(6, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(7, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(8, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(9, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(10, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(11, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(12, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(13, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(14, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(15, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(16, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(17, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(18, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(19, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(20, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(21, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(22, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(23, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(24, (5., 5.), 0.), can_print)\n print_if_allowed(solution.add_item(25, (5., 5.), 0.), can_print)\n\n # show elapsed time\n elapsed_time = get_time_since(start_time)\n print_if_allowed(\"Manual elapsed time: {} ms\".format(round(elapsed_time, 3)), can_print)\n\n return problems, [str(i + 1) for i in range(len(problems))], solutions", "def create_population(board_size, population_size):\n return [Nonogram(board_size) for x in range(0, population_size)]", "def setup(self, length):\n self.matrix = [None] * length\n for x in range(0,length):\n self.matrix[x] = [None] * length\n self.i = self.k = self.j = 0", "def make_board():\n return [[0 for i in range(8)] for i in range(8)]", "def create_board(board_size):\n board = []\n for i in range(board_size):\n row = []\n for j in range(board_size):\n row.append('-')\n board.append(row)\n return board", "def makeBoard(n):\n valid_positions = []\n for i in range(0, n):\n for j in range(0,n):\n valid_positions.append(Position(i,j))\n return valid_positions", "def generate_new_puzzle():\n new_puzzle = pb() \n\n # only generate solvable puzzles\n while not new_puzzle.is_solvable():\n new_puzzle = pb()\n\n return new_puzzle", "def _build_board(y_size, x_size, game_board):\n\n for y_coordinate in range(1, y_size + 1):\n for x_coordinate in range(1, x_size + 1):\n game_board[(x_coordinate, y_coordinate)] = {0: {}, 1: {}, 2: {}}", "def __generate_goal(width, length):\n goal = np.arange(1, ((width * length)+1)).reshape(length, width)\n goal[length - 1][width - 1] = 0\n return goal\n\n\n # This was the string builder method for the returned string.", "def generate_possible_slices(L, H):\n n_min = 2 * L\n n_max = H\n\n slices = []\n for he in range(1, n_max+1):\n for wi in range(max(1, n_min // he), n_max + 1):\n if he * wi > n_max:\n break\n slices.append((wi, he))\n\n return slices", "def gen_grid(grid_width, grid_height):\n\n grid = []\n for x in range(0, grid_width):\n grid.append([])\n for y in range(0, grid_height):\n grid[x].append(False)\n return grid", "def min_len_options(board):\n min_length = 9\n best_options = []\n for i in range(n):\n for j in range(n):\n cur_length = len(board[i][j])\n if cur_length < min_length and cur_length > 1:\n min_length = cur_length\n best_options = [(board[i][j], i, j)]\n elif cur_length == min_length:\n best_options += [(board[i][j], i, j)]\n return best_options", "def create_board(self, dimension):\n\n board = []\n\n for i in range(dimension):\n row = []\n for j in range(dimension):\n row.append(' ')\n board.append(row)\n\n return board", "def generate_3d_board(n):\n layer = nxn_generate.generate_shuffled_2d_board(n)\n cube = []\n for i in range(len(layer)):\n new_layer = []\n for column in layer:\n new_column = []\n # this nested mess is to ensure that none of the sub 3x3 squares violates sudoku rules from any x y or z\n # perspective (also the Latin Square rules but the subsquares are trickier and the cause of more mess)\n for j in range(int(math.sqrt(len(layer)))):\n for k in range(int(math.sqrt(len(layer)))):\n # lot of 3 = (i+j) % 3\n # index within lot = (i + k + (i//3)) % 3\n new_column.append(column[int(math.sqrt(len(layer))) * ((i + j) % int(math.sqrt(len(layer)))) + (\n i + k + (i // int(math.sqrt(len(layer))))) % int(math.sqrt(len(layer)))])\n new_layer.append(new_column)\n cube.append(new_layer)\n\n return shuffle_cube(cube)", "def get_puzzle(self):\n return [[str(self.puzzle[i][j]) for j in range(len(self.puzzle[0]))] for i in range(len(self.puzzle))]", "def test_create_tile_puzzle(self):\n p = hw.create_tile_puzzle(3, 3)\n self.assertEqual(p.get_board(), [[1,2,3],[4,5,6],[7,8,0]])\n p = hw.create_tile_puzzle(2, 4)\n self.assertEqual(p.get_board(), [[1,2,3,4],[5,6,7,0]])\n p = hw.create_tile_puzzle(1, 4)\n self.assertEqual(p.get_board(), [[1,2,3,0]])", "def createUpPyramidSets(blocksize,operating):\n sets = tuple()\n ul = blocksize[0]-operating\n dl = operating\n while ul > dl:\n r = numpy.arange(dl,ul,1)\n sets+=(tuple(product(r,r)),)\n dl+=operating\n ul-=operating\n return sets", "def to_pppack_style(tau):\n # saving the list in the given order\n\n Ni = [len(t) for t in tau]\n aux = []\n # generating grid points with the required order for pppack\n for x, i in zip(itertools.product(*tau), itertools.product(*[range(nx) for nx in Ni])):\n aux.append([x, getidx(i, Ni)])\n grid = []\n for point in sorted([v for v in aux], key=lambda tup: (tup[1])):\n grid.append(point[0])\n return grid", "def create_grid(grid):\r\n for i in range(4):\r\n grid.append([0]*4)\r\n return grid", "def __init__(self, length):\n self.length = length\n self.lst = []\n for x in range(length):\n for y in range(length):\n t = ((x+1, y+1), (x+1)*(y+1))\n self.lst.append(t)", "def create_empty_grid(width, height):\n return [[None] * width for _ in range(height)]", "def create_board(N):\n board = [[0 for x in range(N)] for y in range(N)] \n return board", "def arrangeQueens(N):\n arrangements = list()\n columns = [None] * N\n __arrangeNQueens__(arrangements, columns, 0, N)\n return arrangements", "def solve_puzzle(self):\n # replace with your code\n string = ''\n width = self._width\n height = self._height\n zero = self.current_position(0, 0)\n row_to_zero = height - 1 - zero[0]\n col_to_zero = width - 1 - zero[1]\n string += 'r' * col_to_zero\n string += 'd' * row_to_zero\n self.update_puzzle(string)\n if width == 2 and height == 2:\n string += self.solve_2x2()\n elif width > 2 and height == 2:\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n elif width == 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n string += self.solve_2x2()\n elif width > 2 and height > 2:\n for row in range(height - 1, 1, -1):\n for col in range(width - 1, 0, -1):\n string += self.solve_interior_tile(row, col)\n string += self.solve_col0_tile(row)\n #for row in range(height - 1, -1, -1):\n for col in range(width - 1, 1, -1):\n string += self.solve_row1_tile(col)\n string += self.solve_row0_tile(col)\n string += self.solve_2x2()\n return string", "def piece_factory(total_length, piece_length, hashes):\n num_pieces = (total_length // piece_length) + 1\n for i in range(num_pieces - 1):\n yield Piece(i, piece_length, hashes[i])\n yield Piece(num_pieces - 1, total_length % piece_length, hashes[num_pieces - 1])", "def __init__(self, initial, size, horizontalChunks, verticalChunks, goal = \"\"):\n\t\tself.initial = initial\n\t\tself.size = size\n\t\tself.horChunks = horizontalChunks\n\t\tself.verChunks = verticalChunks\n\n\t\t# Goal holds the solution, once we find it.\n\t\tself.goal = goal\n\n\t\t# For a puzzle of size n, initializes blank n x n 2d array\n\t\tself.graph = [[0 for x in range(self.size)] for x in range(self.size)] \n\t\tfor i in range (0,self.size):\n\t\t\tfor j in range (0,self.size):\n\t\t\t\tself.graph[i][j] = initial[i*self.size + j] \n\t\tself.initial = \"\"", "def new_board(n: int) -> Board:\n\n return [[0 for _ in range(n)] for _ in range(n)]", "def create_victory_conditions(size): #Written by Cody West. Not used in current program, could be used to make boards of different sizes\n victory_conditions = []\n for i in range(size):\n horizontal_victory = []\n for n in range(size):\n horizontal_victory.append(size*i+n)\n victory_conditions.append(horizontal_victory)\n for i in range(size):\n vertical_victory = []\n for n in range(size):\n vertical_victory.append(size*n+i)\n victory_conditions.append(vertical_victory)\n diagonal_victory_1 = []\n for i in range(size):\n diagonal_victory_1.append(size*i+i)\n victory_conditions.append(diagonal_victory_1)\n diagonal_victory_2 = []\n for i in range(size):\n diagonal_victory_2.append((i+1)*size-(i+1))\n victory_conditions.append(diagonal_victory_2)\n return(victory_conditions)", "def make_deck():\n deck = []\n for i in range(13):\n for j in range(13):\n if j >= i:\n deck.append([i, j])\n else:\n pass\n return deck", "def __init__(self, size):\n\t\tself.size = size\n\t\tself.board = []\n\t\tnew = []\n\t\tfor i in range(0, size, 1):\n\t\t\tfor j in range(0, size, 1):\n\t\t\t\tnew.append(0)\n\t\t\tself.board.append(new)\n\t\t\tnew = []", "def board_generate_empty(size: 'board size'):\n empty_board = [[None] * size for _ in range(size)]\n return empty_board", "def create_wild_lists(amount,length):\r\n box = []\r\n\r\n k = 0\r\n while k < amount:\r\n sublist = []\r\n j = 0\r\n while j < length:\r\n num = random.randint(1, 100)\r\n sublist.append(num)\r\n j += 1\r\n box.append(sublist)\r\n k += 1\r\n\r\n if amount == 1:\r\n return sublist\r\n\r\n return box", "def gen_permutations(outcomes, length):\r\n \r\n ans = set([()])\r\n for dummy_idx in range(length):\r\n temp = set()\r\n for seq in ans:\r\n for item in outcomes:\r\n new_seq = list(seq)\r\n if new_seq.count(item) == 0:\r\n new_seq.append(item)\r\n temp.add(tuple(new_seq))\r\n ans = temp\r\n return ans", "def grid_maker(width, height):\n grid = [['.' for i in range(width)] for j in range(height)]\n return grid", "def create_grid(grid):\r\n inner = [0]*4\r\n for i in range(4):\r\n grid.append(inner[:])", "def __init__(self, puzzle_height, puzzle_width, initial_grid=None):\r\n self._height = puzzle_height\r\n self._width = puzzle_width\r\n self._grid = [[col + puzzle_width * row\r\n for col in range(self._width)]\r\n for row in range(self._height)]\r\n\r\n if initial_grid != None:\r\n for row in range(puzzle_height):\r\n for col in range(puzzle_width):\r\n self._grid[row][col] = initial_grid[row][col]", "def __init__(self, puzzle_height, puzzle_width, initial_grid=None):\r\n self._height = puzzle_height\r\n self._width = puzzle_width\r\n self._grid = [[col + puzzle_width * row\r\n for col in range(self._width)]\r\n for row in range(self._height)]\r\n\r\n if initial_grid != None:\r\n for row in range(puzzle_height):\r\n for col in range(puzzle_width):\r\n self._grid[row][col] = initial_grid[row][col]", "def __init__(self, puzzle_height, puzzle_width, initial_grid=None):\r\n self._height = puzzle_height\r\n self._width = puzzle_width\r\n self._grid = [[col + puzzle_width * row\r\n for col in range(self._width)]\r\n for row in range(self._height)]\r\n\r\n if initial_grid != None:\r\n for row in range(puzzle_height):\r\n for col in range(puzzle_width):\r\n self._grid[row][col] = initial_grid[row][col]", "def generate_grid(height, width):\n return [[random.randint(0, 9) for _ in range(width)] for _ in range(height)]", "def generate_boards():\n\n print \"Generating data, please hold on...\"\n # a list for turns, each which is a list of boards, which are unique layouts\n # a completely blank layout is always the start of the game, counting for turn 0\n game = [[Board(' ' * 9, 1)]]\n\n # there are at most 9 turns in a game of tic tac toe\n for turnNum in range(1, 10):\n # list of layouts for the current turn\n turn = []\n upperLayouts = game[-1]\n\n if turnNum % 2 == 1: player = 'X'\n else: player = 'O'\n\n # every turns' unique layouts are numbered to seperate them more easily\n pattern = 1\n # goes through every layout from the previous turn\n for ul in upperLayouts:\n # game does not continue after a winning move, and using a won board is only possible after turn 5\n if turnNum <= 5 or not ul.check_win()[0]:\n # 9 positions on every board\n for pos in range(9):\n if ul[pos] == ' ':\n newLayout = Board(ul[0:pos] + player + ul[pos+1:])\n # if it is a unique layout\n unique = True\n # goes through every existing layout for this turn\n for item in turn:\n if newLayout.matches(item): \n unique = False\n # the upper layout leads to an existing layout\n ul.paths.append(item.pattern)\n break\n if unique:\n turn.append(Board(newLayout, pattern))\n # the current upper layout leads to the new layout\n ul.paths.append(pattern)\n pattern += 1\n else:\n # adds a zero for paths because a played character is taking up that space\n ul.paths.append(0)\n game.append(turn)\n return game", "def big_tiling():\n t = Tiling(\n obstructions=(\n GriddedPerm(Perm((0,)), ((0, 1),)),\n GriddedPerm(Perm((0,)), ((0, 2),)),\n GriddedPerm(Perm((0,)), ((0, 3),)),\n GriddedPerm(Perm((0,)), ((1, 2),)),\n GriddedPerm(Perm((0,)), ((1, 3),)),\n GriddedPerm(Perm((1, 0)), ((0, 0), (0, 0))),\n GriddedPerm(Perm((1, 0)), ((0, 0), (1, 0))),\n GriddedPerm(Perm((1, 0)), ((0, 0), (2, 0))),\n GriddedPerm(Perm((1, 0)), ((1, 0), (1, 0))),\n GriddedPerm(Perm((1, 0)), ((1, 0), (2, 0))),\n GriddedPerm(Perm((1, 0)), ((1, 1), (1, 0))),\n GriddedPerm(Perm((1, 0)), ((1, 1), (1, 1))),\n GriddedPerm(Perm((1, 0)), ((1, 1), (2, 0))),\n GriddedPerm(Perm((1, 0)), ((1, 1), (2, 1))),\n GriddedPerm(Perm((1, 0)), ((2, 0), (2, 0))),\n GriddedPerm(Perm((1, 0)), ((2, 1), (2, 0))),\n GriddedPerm(Perm((1, 0)), ((2, 1), (2, 1))),\n GriddedPerm(Perm((1, 0)), ((2, 2), (2, 0))),\n GriddedPerm(Perm((1, 0)), ((2, 2), (2, 1))),\n GriddedPerm(Perm((1, 0)), ((2, 2), (2, 2))),\n GriddedPerm(Perm((2, 1, 0)), ((2, 3), (2, 3), (2, 0))),\n GriddedPerm(Perm((2, 1, 0)), ((2, 3), (2, 3), (2, 1))),\n GriddedPerm(Perm((2, 1, 0)), ((2, 3), (2, 3), (2, 2))),\n GriddedPerm(Perm((2, 1, 0)), ((2, 3), (2, 3), (2, 3))),\n ),\n requirements=(),\n )\n return t", "def create_grid(self):\n return [[0] * self.width for _ in range(self.height)]", "def splatter(size):\n solution = 0\n while not solution:\n grid = np.random.randint(0, 2, size=(size, size,), dtype=bool)\n grid |= np.random.randint(0, 2, size=(size, size,), dtype=bool)\n grid &= np.random.randint(0, 2, size=(size, size,), dtype=bool)\n grid[0, 0:size] = grid[size - 1, 0:size] = True\n grid[0:size, 0] = grid[0:size, size - 1] = True\n grid[1, 1] = grid[size-2, size-2] = False\n solution = solvable(grid)\n yield grid", "def arrange_tiles(self, layer):\n\n # número de tiles en 'x'\n width = self.width\n arranged_tiles = layer.arranged_tiles\n\n row = -1\n\n # convierte una lista en un diccionario\n for col, tile in enumerate(layer.tiles):\n # calcula la ubicación en dos dimensiones (fila y columna) de cada tile,\n # los tiles originalmente están ordenados en línea\n col %= width\n if col == 0:\n row += 1\n\n # excluye los tiles con id 0,\n # id 0 representa un espacio vacío en el tilemap\n if tile != 0:\n arranged_tiles[(row, col)] = tile\n\n # libera la memoria ocupada por la lista de tiles\n layer.tiles = None", "def board(constraints):\n rows = len(constraints[0])\n columns = len(constraints[1])\n board = []\n for i in range(rows):\n board.append([Empty for k in range(columns)])\n return board", "def __init__(self, puzzle_height, puzzle_width, initial_grid=None):\n self._height = puzzle_height\n self._width = puzzle_width\n self._grid = [[col + puzzle_width * row for col in range(self._width)]\n for row in range(self._height)]\n\n if initial_grid != None:\n for row in range(puzzle_height):\n for col in range(puzzle_width):\n self._grid[row][col] = initial_grid[row][col]", "def createBoard(width, height):\n A = []\n for row in range(height):\n A += [createOneRow(width)]\n return A", "def create_packing_problems_with_optimal_solution_values():\n\n problems, problem_names, optimal_values = list(), list(), list()\n\n # the capacity is set to infinite so that it never restricts placements; all items have value 1 so that the objective is to maximize the number of placed items\n max_weight = np.inf\n\n # Circles in circle; Wolfram Alpha query: \"pack 7 circles of radius 3.9 in a circle of radius 13\"; full link: https://www.wolframalpha.com/input/?i=pack+7+circles+of+radius+3.9+in+a+circle+of+radius+13\n container_shape = Circle((13, 13), 13)\n container = Container(max_weight, container_shape)\n item_num = 7\n items = [Item(Circle((0, 0), 3.9), 1., 1.)] * item_num\n problem = Problem(container, items)\n problems.append(problem)\n problem_names.append(\"Circles in circle\")\n optimal_values.append(item_num)\n\n # Triangles in circle; Wolfram Alpha query: \"pack 20 triangles of side 4 in a circle of radius 9.5\"; full link: https://www.wolframalpha.com/input/?i=pack+20+triangles+of+side+4+in+a+circle+of+radius+9.5\n container_shape = Circle((9.5, 9.5), 9.5)\n container = Container(max_weight, container_shape)\n item_num = 20\n items = [Item(shape_functions.create_equilateral_triangle((0, 0), 4), 1., 1.)] * item_num\n problem = Problem(container, items)\n problems.append(problem)\n problem_names.append(\"Triangles in circle\")\n optimal_values.append(item_num)\n\n # Squares in circle; Wolfram Alpha query: \"pack 12 squares of side 3 in a circle of radius 7.8\"; full link: https://www.wolframalpha.com/input/?i=pack+12+squares+of+side+3+in+a+circle+of+radius+7.8\n container_shape = Circle((7.8, 7.8), 7.8)\n container = Container(max_weight, container_shape)\n item_num = 12\n items = [Item(shape_functions.create_square((0, 0), 3), 1., 1.)] * item_num\n problem = Problem(container, items)\n problems.append(problem)\n problem_names.append(\"Squares in circle\")\n optimal_values.append(item_num)\n\n # Circles in triangle; Wolfram Alpha query: \"pack 10 circles of radius 3 in a triangle of side 33\"; full link: https://www.wolframalpha.com/input/?i=pack+10+circles+of+radius+3+in+a+triangle+of+side+33\n container_shape = shape_functions.create_equilateral_triangle((19, 9.5), 33)\n container = Container(max_weight, container_shape)\n item_num = 10\n items = [Item(Circle((0, 0), 3), 1., 1.)] * item_num\n problem = Problem(container, items)\n problems.append(problem)\n problem_names.append(\"Circles in triangle\")\n optimal_values.append(item_num)\n\n # Triangles in triangle; Wolfram Alpha query: \"pack 18 triangles of side 3.5 in a triangle of side 20\"; full link: https://www.wolframalpha.com/input/?i=pack+18+triangles+of+side+3.5+in+a+triangle+of+side+20\n container_shape = shape_functions.create_equilateral_triangle((12, 6), 20)\n container = Container(max_weight, container_shape)\n item_num = 18\n items = [Item(shape_functions.create_equilateral_triangle((0, 0), 3.5), 1., 1.)] * item_num\n problem = Problem(container, items)\n problems.append(problem)\n problem_names.append(\"Triangles in triangle\")\n optimal_values.append(item_num)\n\n # Squares in triangle; Wolfram Alpha query: \"pack 30 squares of side 7.5 in a triangle of side 80\"; full link: https://www.wolframalpha.com/input/?i=pack+24+squares+of+side+7.5+in+a+triangle+of+side+80\n container_shape = shape_functions.create_equilateral_triangle((49, 24.5), 80)\n container = Container(max_weight, container_shape)\n item_num = 30\n items = [Item(shape_functions.create_square((0, 0), 7.5), 1., 1.)] * item_num\n problem = Problem(container, items)\n problems.append(problem)\n problem_names.append(\"Squares in triangle\")\n optimal_values.append(item_num)\n\n # Circles in square; Wolfram Alpha query: \"pack 50 circles of radius 17 in a square of side 300\"; full link https://www.wolframalpha.com/input/?i=pack+50+circles+of+radius+17+in+a+square+of+side+300:\n container_shape = shape_functions.create_square((150, 150), 300)\n container = Container(max_weight, container_shape)\n item_num = 50\n items = [Item(Circle((0, 0), 17), 1., 1.)] * item_num\n problem = Problem(container, items)\n problems.append(problem)\n problem_names.append(\"Circles in square\")\n optimal_values.append(item_num)\n\n # Triangles in square; Wolfram Alpha query: \"pack 15 triangles of side 4 in a square of side 14\"; full link: https://www.wolframalpha.com/input/?i=pack+15+triangles+of+side+4+in+a+square+of+side+14\n container_shape = shape_functions.create_square((7, 7), 14)\n container = Container(max_weight, container_shape)\n item_num = 15\n items = [Item(shape_functions.create_equilateral_triangle((0, 0), 4), 1., 1.)] * item_num\n problem = Problem(container, items)\n problems.append(problem)\n problem_names.append(\"Triangles in square\")\n optimal_values.append(item_num)\n\n # Squares in square; Wolfram Alpha query: \"pack 100 squares of side 4 in a square of side 58\"; full link: https://www.wolframalpha.com/input/?i=pack+100+squares+of+side+4+in+a+square+of+side+58\n container_shape = shape_functions.create_square((22.5, 22.5), 58)\n container = Container(max_weight, container_shape)\n item_num = 100\n items = [Item(shape_functions.create_square((0, 0), 4), 1., 1.)] * item_num\n problem = Problem(container, items)\n problems.append(problem)\n problem_names.append(\"Squares in square\")\n optimal_values.append(item_num)\n\n return problems, problem_names, optimal_values", "def build(xaxis, yaxis, zaxis):\n matrix = []\n for floor in range(zaxis):\n roomnum = 1\n matrix.append([])\n for row in range(yaxis):\n matrix[floor].append([])\n for column in range(xaxis):\n matrix[floor][row].append(str(roomnum))\n roomnum += 1\n return matrix", "def __init__(self, puzzle_height, puzzle_width, initial_grid = None):\n self._height = puzzle_height\n self._width = puzzle_width\n self._grid = [[col + puzzle_width * row\n for col in range(self._width)]\n for row in range(self._height)]\n\n if initial_grid != None:\n for row in range(puzzle_height):\n for col in range(puzzle_width):\n self._grid[row][col] = initial_grid[row][col]", "def createDownPyramidSets(blocksize,operating):\n bsx = int(blocksize[0]/2)\n bsy = int(blocksize[1]/2)\n dl = int((bsy)-operating); #lower y\n ul = int((bsy)+operating); #upper y\n sets = tuple()\n while dl > 0:\n r = numpy.arange(dl,ul,1)\n sets+=(tuple(product(r,r)),)\n dl-=operating\n ul+=operating\n return sets", "def printPuzzle(self):\n for i in range(9):\n print(self.puzzle[0][i], end=\" \")\n for n in range(1, 9):\n print()\n for m in range(9):\n print(self.puzzle[n][m], end=\" \")\n print(\"\\n\")", "def __init__(self, puzzle_height, puzzle_width, initial_grid=None):\n self._height = puzzle_height\n self._width = puzzle_width\n self._grid = [[col + puzzle_width * row\n for col in range(self._width)]\n for row in range(self._height)]\n\n if initial_grid != None:\n for row in range(puzzle_height):\n for col in range(puzzle_width):\n self._grid[row][col] = initial_grid[row][col]", "def __init__(self, puzzle_height, puzzle_width, initial_grid=None):\n self._height = puzzle_height\n self._width = puzzle_width\n self._grid = [[col + puzzle_width * row\n for col in range(self._width)]\n for row in range(self._height)]\n\n if initial_grid != None:\n for row in range(puzzle_height):\n for col in range(puzzle_width):\n self._grid[row][col] = initial_grid[row][col]", "def createBoard(height, width):\r\n A = []\r\n for row in range(height):\r\n A += [createOneRow(width)] \r\n return A\r\n #return [createOneRow(width) for x in range(height)]\r", "def corner_combinations(zdim: int):\n return combinations(range(zdim), 2)", "def random_puzzle(N=17):\n values = dict((s, digits) for s in squares)\n for s in shuffled(squares):\n if not assign(values, s, random.choice(values[s])):\n break\n ds = [values[s] for s in squares if len(values[s]) == 1]\n if len(ds) >= N and len(set(ds)) >= 8:\n return ''.join(values[s] if len(values[s]) == 1 else '.' for s in squares)\n return random_puzzle(N) ## Give up and make a new puzzle", "def puzzle_generator():\r\n print(\"Generating puzzles...\")\r\n puzzle_container = []\r\n while len(puzzle_container) < 25:\r\n next_state_tuple = ()\r\n check_dict = {}\r\n \r\n initial_state_tuple = ([[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]], (0, 0))\r\n for i in range(20):\r\n state_container = next_possible_states([initial_state_tuple], check_dict, True)\r\n try:\r\n next_state_tuple = random.choice(state_container)\r\n initial_state_tuple = next_state_tuple\r\n except IndexError:\r\n if initial_state_tuple not in puzzle_container:\r\n puzzle_container.append(initial_state_tuple)\r\n break\r\n if initial_state_tuple not in puzzle_container:\r\n puzzle_container.append(initial_state_tuple)\r\n \r\n if len(puzzle_container) == 25:\r\n print(\"25 distinct puzzles are succesfully generated!\")\r\n return puzzle_container\r\n else:\r\n print(\"Puzzle generation failed!\")", "def Solution(self):\n self.solver.check()\n m = self.solver.model()\n answer = [[0] * self.width for i in range(self.height)]\n for y in range(self.height):\n for x in range(self.width):\n answer[y][x] = int(str(m[self.grid[(x, y)]]))\n return answer", "def make_game_grid(self):\n return numpy.array([[random.choice(string.ascii_uppercase) for breath in range(self.grid_size)] for depth in\n range(self.grid_size)])", "def make_complete_graph(num_vertices):\n V = num_vertices\n K = V * (V - 1) // 2\n grid = np.zeros([3, K], np.int32)\n k = 0\n for v2 in range(V):\n for v1 in range(v2):\n grid[:, k] = [k, v1, v2]\n k += 1\n return grid", "def __init__(self, size_of_puzzle, number_of_rows=2):\n # Define the puzzle properties\n self.puzzle_length = size_of_puzzle\n self.number_of_rows = number_of_rows\n self.puzzle_width = int(size_of_puzzle / number_of_rows)\n\n # Define the costs for each of the types of moves\n regular_move_cost = 1\n wrapping_move_cost = 2\n diagonal_move_cost = 3\n\n # These are all of the 'regular' moves\n self.cost_of_move_up = regular_move_cost\n self.cost_of_move_down = regular_move_cost\n self.cost_of_move_right = regular_move_cost\n self.cost_of_move_left = regular_move_cost\n\n # These are all of the 'wrapping' moves\n self.cost_of_wrap_move = wrapping_move_cost\n\n # These are all of the 'diagonal' moves\n self.cost_of_diagonal_adjacent = diagonal_move_cost\n self.cost_of_diagonal_across = diagonal_move_cost", "def create_pristine_board(size=100):\n board = defaultdict(dict)\n\n for i in xrange(1, size + 1):\n board[i] = {j: (j - i) for j in xrange(min(i + 1, size + 1), min(i + 7, size + 1))}\n\n return board", "def create_grid(grid):\r\n for i in range (4):\r\n grid.append ([])\r\n for j in range (4):\r\n grid[i].append (0)", "def test_Pad3D27():\n input_shape = (1, 1, 2, 2, 2)\n pad = 2\n mode = \"replicate\"\n res = [\n [\n [\n [\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n ],\n [\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n ],\n [\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n [3, 3, 3, 4, 4, 4],\n ],\n [\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n ],\n [\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n ],\n [\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [5, 5, 5, 6, 6, 6],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n [7, 7, 7, 8, 8, 8],\n ],\n ]\n ]\n ]\n\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.base(res=res, padding=pad, mode=mode, data_format=\"NCDHW\", data=data)", "def possible_motifs_by_length(length, base_set=\"ACGU\"):\n args = [base_set for i in xrange(length)]\n for permutation in itertools.product(*args):\n yield \"\".join(permutation)", "def twoDize(array, width):\n count = 0\n output = []\n temp = []\n while len(array) > 0:\n temp.append(array.pop())\n if len(temp) == width:\n output.append(temp)\n temp = []\n return output", "def print_puzzle(state):\r\n \r\n print('-----')\r\n for i in range(4):\r\n print('|', end=\"\")\r\n for j in range(3):\r\n if state[i][j] == 0:\r\n print(\" |\", end=\"\")\r\n else:\r\n print(\"\", state[i][j], \"|\", end=\"\")\r\n if i == 0:\r\n break\r\n print('\\n-------------')", "def make_permutation(partition):\r\n P = Permutation()\r\n c = 0\r\n for j in range(len(partition)):\r\n a = []\r\n for h in range(partition[j]):\r\n a.append(c)\r\n c = c + 1 \r\n if (c == 1):\r\n P1 = Permutation()\r\n c = 0\r\n else:\r\n P1 = Permutation([a])\r\n P = P*P1\r\n return P", "def __generate_goal_board(self):\n element = 1\n array = []\n\n for row in range(self._n):\n row_to_append = []\n for col in range(self._n):\n row_to_append.append(element)\n element += 1\n array.append(row_to_append)\n\n array[self._n - 1][self._n - 1] = 0\n self._solved_board = Board(array=array, space=[self._n - 1, self._n - 1])", "def gen_maze(dim, p):\n maze = []\n for i in range(dim):\n maze.append([])\n for j in range(dim):\n if(random.uniform(0, 1) < p):\n maze[i].append(1)\n else:\n maze[i].append(0)\n\n maze[0][0] = 0\n maze[dim - 1][dim - 1] = 0\n return maze", "def create_rand_grid(grid_size):\n\n return [[randint(0, 1) for x in range(0, grid_size)] for y in range(0, grid_size)]", "def create_grid(height, width):\n grid = []\n \n for r in range(height):\n row = [0] * width # a row containing width 0s\n grid += [row]\n\n return grid", "def generate_repeats(min_size, max_size):\n generated_repeats = []\n alphabet = ['A', 'C', 'G', 'T']\n expanded_set = set()\n repeat_set = set()\n for i in range(min_size, max_size+1):\n for combination in product(alphabet, repeat=i):\n repeat = ''.join(combination)\n repeat_revcomp = rev_comp(repeat)\n expanded = expand_repeat(repeat, max_size)\n if expanded in expanded_set:\n continue\n else:\n repeat_cycles = get_cycles(repeat)\n for cycle in repeat_cycles:\n strand = '+'\n string = expand_repeat(cycle, max_size)\n expanded_set.add(string)\n if cycle not in repeat_set:\n repeat_set.add(cycle)\n generated_repeats.append('\\t'.join([cycle, repeat, str(len(cycle)), strand]))\n if repeat_revcomp == repeat:\n continue\n repeat_cycles = get_cycles(repeat_revcomp)\n for cycle in repeat_cycles:\n strand = '-'\n string = expand_repeat(cycle, max_size)\n expanded_set.add(string)\n if cycle not in repeat_set:\n repeat_set.add(cycle)\n generated_repeats.append('\\t'.join([cycle, repeat, str(len(cycle)), strand]))\n return generated_repeats", "def new_game(n):\n matrix = []\n\n for i in range(n):\n matrix.append([0] * n)\n return matrix", "def read_puzzle(filename: str) -> list:\n arr_puz = []\n with open(filename, 'r') as file:\n try:\n puzzle = file.read()\n for char in puzzle:\n if char == '\\n' or char == ' ':\n continue\n arr_puz.append(char)\n lenght = len(arr_puz)\n arr_puz = list(map(int, arr_puz))\n canon = []\n m = 0\n n = int(sqrt(lenght))\n for i in range(int(sqrt(lenght))):\n canon.append(arr_puz[m:n])\n m = n\n n += int(sqrt(lenght))\n return canon\n\n except:\n raise TypeError(\"\\nProblems with file!\\n\"\n \"Your puzzle should be 3x3\\n\"\n \"Empty space must be '0'\\n\")", "def create_coords_medium(ph):\n # Min: 8, max 12\n for start_row in xrange(ph.pizza.shape[0]):\n for start_col in xrange(ph.pizza.shape[1]-2*ph.min_ing_per_slice+1):\n # First scenario\n for i in xrange(ph.min_ing_per_slice*2, ph.max_cells_per_slice+1):\n end_row = start_row + 1\n end_col = start_col + i\n yield (start_row, start_col, end_row, end_col)\n yield (start_row, start_col, end_col, end_row)\n\n for start_row in xrange(ph.pizza.shape[0]-1):\n for start_col in xrange(ph.pizza.shape[1]-3):\n # Second scenario\n for i in xrange(ph.min_ing_per_slice, ph.min_ing_per_slice+3):\n end_row = start_row + 2\n end_col = start_col + i\n yield (start_row, start_col, end_row, end_col)\n yield (start_row, start_col, end_col, end_row)\n\n for start_row in xrange(ph.pizza.shape[0] - 2):\n for start_col in xrange(ph.pizza.shape[1] - 2):\n # Third scenario\n for i in xrange(3, 5):\n end_row = start_row + 3\n end_col = start_col + i\n yield (start_row, start_col, end_row, end_col)\n yield (start_row, start_col, end_col, end_row)", "def get_start_grid(cols=4, rows=4):\n\tgrid = [[\"\"]*cols for i in range(rows)]\n\tfor i in range(2):\n\t\tempties = get_empty_cells(grid)\n\t\ty,x = random.choice(empties)\n\t\tgrid[y][x] = 2 if random.random() < prob_2 else 4\n\treturn grid", "def fill_gap_2_0(R):\n suitable_positions = [[], [], [], [], []] # stores all suitable positions for pieces respectively\n for row_idx in range(8):\n for col_idx in range(8):\n if single_pos_no_conflict_check((row_idx, col_idx), R): # iterate all vacant positions\n if single_move_feasible_q(pos0[0], (row_idx, col_idx)) \\\n and single_move_feasible_q((row_idx, col_idx), R[1][0]):\n suitable_positions[0].append((row_idx, col_idx))\n if single_move_feasible_k(pos0[1], (row_idx, col_idx)) \\\n and single_move_feasible_k((row_idx, col_idx), R[1][1]):\n suitable_positions[1].append((row_idx, col_idx))\n if single_move_feasible_r(pos0[2], (row_idx, col_idx)) \\\n and single_move_feasible_r((row_idx, col_idx), R[1][2]):\n suitable_positions[2].append((row_idx, col_idx))\n if single_move_feasible_n(pos0[3], (row_idx, col_idx)) \\\n and single_move_feasible_n((row_idx, col_idx), R[1][3]):\n suitable_positions[3].append((row_idx, col_idx))\n if single_move_feasible_b(pos0[4], (row_idx, col_idx)) \\\n and single_move_feasible_b((row_idx, col_idx), R[1][4]):\n suitable_positions[4].append((row_idx, col_idx))\n if [] in suitable_positions:\n return []\n\n result_list = []\n for pos1 in itertools.product(*suitable_positions): # iterate all combinations\n if len(set(pos1)) == 5: # eliminate those different pieces with the same position\n tmp = copy.deepcopy(R)\n tmp[0] = pos1\n result_list.append(tmp)\n return result_list", "def generatePiece(self):\n\n empty_tiles = []\n for y in range(BOARD_SIZE):\n for x in range(BOARD_SIZE):\n if self.grid[x][y].isEmpty():\n empty_tiles.append(self.grid[x][y])\n\n two_or_four = random.choice([2, 4])\n random.choice(empty_tiles).set(two_or_four)", "def createBoard(width, height):\r\n board = []\r\n for i in range(height):\r\n board = board+[createOneRow(width)]\r\n return board\r\n # or\r", "def generate_binary_space(L,order='revsort'):\n\n Ns=2**L\n sorts = {'sort': lambda x: x, 'revsort': lambda x: Ns-1-x}\n sorter = sorts[order]\n space = np.zeros((Ns, L)\n )\n for i in range(Ns):\n d=sorter(i)\n for j in range(L):\n d, r = divmod(d, 2)\n space[i, L - j - 1] = int(r)\n\n return space", "def init_maze(width: int, height: int) -> list[int]:\n return [0] * width * height", "def __init__(self):\r\n self.rows = [[0]*9, [0]*9, [0]*9, [0]*9, [0]*9, [0]*9, [0]*9, [0]*9, [0]*9]\r\n self.block1 = []\r\n self.block5 = []\r\n self.block9 = []\r\n self.puzzle = []\r\n self.score = 0\r\n self.difficulty = 1 # By default Easy difficulty\r\n\r\n \"\"\" Creating blocks using random number generator\"\"\"\r\n while len(self.block1) < 9:\r\n r = random.randrange(1,10)\r\n if r not in self.block1:\r\n self.block1.append(r)\r\n\r\n while len(self.block5) < 9:\r\n r = random.randrange(1,10)\r\n if r not in self.block5:\r\n self.block5.append(r)\r\n\r\n while len(self.block9) < 9:\r\n r = random.randrange(1,10)\r\n if r not in self.block9:\r\n self.block9.append(r)\r\n x = 0\r\n for i in range(3):\r\n for j in range(3):\r\n self.rows[i][j] = self.block1[x]\r\n x = x+1\r\n x = 0\r\n for i in range(3, 6):\r\n for j in range(3, 6):\r\n self.rows[i][j] = self.block5[x]\r\n x = x+1\r\n x = 0\r\n for i in range(6,9):\r\n for j in range(6,9):\r\n self.rows[i][j] = self.block9[x]\r\n x = x+1\r\n \"\"\"Creating a valid solution\"\"\"\r\n self.createsolution(self.rows)", "def copy(self):\r\n board = []\r\n for row in self.board:\r\n board.append([x for x in row])\r\n return Puzzle(board)", "def generate_grid():\n field = []\n three_lst = []\n for three_let in range(0, 3):\n three_lst = []\n for i in range(0, 3):\n three_lst.append(chr(random.randint(97, 122)))\n field.append(three_lst)\n return field", "def solve_puzzle(self):\r\n moves_str = \"\"\r\n # move zero to the most botton right\r\n zero_row, zero_col = self.current_position(0, 0)\r\n downs = self._height - 1 - zero_row\r\n rights = self._width - 1 - zero_col\r\n for dummy_d in range(downs):\r\n moves_str += \"d\"\r\n for dummy_r in range(rights):\r\n moves_str += \"r\"\r\n self.update_puzzle(moves_str)\r\n # Solve the bottom m−2 rows of the puzzle \r\n # in a row by row manner from bottom to top. \r\n # Each individual row will be solved in a right to left order.\r\n if self._height > 2 and self._width > 2:\r\n for row in range(self._height - 1, 1, -1):\r\n for col in range(self._width - 1, 0, -1):\r\n assert self.lower_row_invariant(row, col)\r\n moves_str += self.solve_interior_tile(row, col)\r\n assert self.lower_row_invariant(row, col - 1)\r\n assert self.lower_row_invariant(row, 0)\r\n moves_str += self.solve_col0_tile(row)\r\n assert self.lower_row_invariant(row - 1, self._width - 1)\r\n # Solve the rightmost n−2 columns of the top two rows\r\n # in a right to left order). \r\n # Each column consists of two unsolved positions \r\n # and will be solved in a bottom to top order.\r\n for col in range(self._width - 1, 1, -1):\r\n assert self.row1_invariant(col)\r\n moves_str += self.solve_row1_tile(col)\r\n assert self.row0_invariant(col)\r\n moves_str += self.solve_row0_tile(col)\r\n assert self.row1_invariant(col - 1)\r\n # Solve the upper left 2×2 portion of the puzzle directly.\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n \r\n elif self._height <=2 and self._width > 2:\r\n for col in range(self._width - 1, 1, -1):\r\n assert self.row1_invariant(col)\r\n moves_str += self.solve_row1_tile(col)\r\n assert self.row0_invariant(col)\r\n moves_str += self.solve_row0_tile(col)\r\n assert self.row1_invariant(col - 1)\r\n # Solve the upper left 2×2 portion of the puzzle directly.\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n elif self._height <= 2 and self._width <= 2:\r\n assert self.row1_invariant(1)\r\n moves_str += self.solve_2x2()\r\n #elif self._height > 2 and self._width <= 2:\r\n \r\n print moves_str\r\n print self._grid\r\n return moves_str", "def __createTiles(self, length, width, height):\n\n rectangles = []\n centrePoints = []\n \n # Defines the dimensions required to fit all tiles\n totalHeight = length * height\n totalWidth = length * width\n \n # Go through all tiles\n y = length\n while y < totalHeight + length:\n\n x = length\n while x < totalWidth + length:\n # Creates a Rect object\n rectangle = pygame.Rect(x, y, length, length)\n rectangles.append(rectangle)\n\n # Calculates the tile's centre point.\n centrePoint = (math.floor(x + length/2), math.floor(y + length/2))\n centrePoints.append(centrePoint)\n\n x += length\n y += length\n\n return rectangles, centrePoints", "def create_board(rows, columns):\n res = [[0 for i in range(columns)] for j in range(rows)]\n return res", "def extensions(self):\n # list accumulator for all extensions\n lst = []\n\n for row in range(self.n):\n for column in range(self.m):\n if self.from_grid[row][column] == \"*\":\n if row - 1 in range(self.n):\n # make all tuples in grid lists, for object assignment\n new_grid = [list(x) for x in self.from_grid]\n new_grid[row - 1][column] = \"*\"\n new_grid[row][column] = self.from_grid[row - 1][column]\n # change back to tuples, for new from_grid\n t = ()\n for n in new_grid:\n t += (tuple(n),)\n lst.append(MNPuzzle(t, self.to_grid))\n\n if row + 1 in range(self.n):\n # make all tuples in grid lists, for object assignment\n new_grid = [list(x) for x in self.from_grid]\n new_grid[row + 1][column] = \"*\"\n new_grid[row][column] = self.from_grid[row + 1][column]\n # change back to tuples, for new from_grid\n t = ()\n for n in new_grid:\n t += (tuple(n),)\n lst.append(MNPuzzle(t, self.to_grid))\n\n if column - 1 in range(self.m):\n # make all tuples in grid lists, for object assignment\n new_grid = [list(x) for x in self.from_grid]\n new_grid[row][column - 1] = \"*\"\n new_grid[row][column] = self.from_grid[row][column - 1]\n # change back to tuples, for new from_grid\n t = ()\n for n in new_grid:\n t += (tuple(n),)\n lst.append(MNPuzzle(t, self.to_grid))\n\n if column + 1 in range(self.m):\n # make all tuples in grid lists, for object assignment\n new_grid = [list(x) for x in self.from_grid]\n new_grid[row][column + 1] = \"*\"\n new_grid[row][column] = self.from_grid[row][column + 1]\n # change back to tuples, for new from_grid\n t = ()\n for n in new_grid:\n t += (tuple(n),)\n lst.append(MNPuzzle(t, self.to_grid))\n return lst" ]
[ "0.64792323", "0.62767714", "0.61650395", "0.6140122", "0.6096842", "0.60461116", "0.58814245", "0.5815948", "0.5800065", "0.57949734", "0.5787429", "0.5744311", "0.57322896", "0.56909317", "0.5681817", "0.56767344", "0.5667736", "0.56485003", "0.5632847", "0.56253445", "0.5610726", "0.5588279", "0.55753785", "0.5562644", "0.55613655", "0.5537864", "0.55322677", "0.5530864", "0.55264354", "0.5518835", "0.55184", "0.549843", "0.5494124", "0.54937303", "0.5491817", "0.54771155", "0.5473019", "0.5467172", "0.54642785", "0.54579616", "0.54570705", "0.5455751", "0.5455348", "0.54549795", "0.5454477", "0.54510415", "0.54510415", "0.54510415", "0.54508865", "0.5448518", "0.54260945", "0.54212624", "0.53994364", "0.53930163", "0.53865147", "0.5381894", "0.5378502", "0.53759426", "0.5374547", "0.5365069", "0.5360616", "0.53549933", "0.5353118", "0.5353118", "0.5346759", "0.5344228", "0.5340504", "0.53243226", "0.5317745", "0.53046614", "0.52996504", "0.5289516", "0.5288351", "0.52876997", "0.52795017", "0.52772933", "0.5267488", "0.52626306", "0.5254141", "0.5254019", "0.5251132", "0.5229126", "0.522794", "0.5217186", "0.5216598", "0.5212097", "0.5210295", "0.52075464", "0.5204672", "0.5191177", "0.5183202", "0.51778644", "0.5170199", "0.51684564", "0.51679826", "0.5167748", "0.5166454", "0.516624", "0.51654303", "0.5165078" ]
0.5530513
28
Gets the item at the exact coordinate in the board
def __get_box(self, position): return self.__board[position//self.__length][position%self.__length]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __getitem__(self, args):\n x, y = args\n xpos, ypos = self.move(x, y)\n return self.board[xpos][ypos]", "def get_item(self, items, y, x):\n if self.maze[y][x] != \" \" and self.maze[y][x] in items:\n return self.maze[y][x]\n else:\n return None", "def get_piece(self, row, column):\n return self._board[row][column]", "def get_piece(self, position):\n return self.board[position[0]][position[1]]", "def __getitem__(self, index):\n x, y = index\n if 0 <= x < self.width and 0 <= y < self.height:\n return self.cells[x + y * self.width]\n else:\n return None", "def get_cell(self, point):\n return self._grid[point.x][point.y]", "def __getitem__(self, key):\n\t\tif not self._is_valid_key(key):\n\t\t\traise KeyError\n\t\t\n\t\tx, y = self._index_from_key(key)\n\t\treturn self._board[x][y]", "def get_piece(self, index):\n return self.squares[index]", "def get(self, index):\n return self.board[index]", "def get(self, point):\n\t\treturn self._grid.get(point)", "def getCellFromPosition(self, xPos, yPos):\n for cell in self.cells:\n if(xPos == cell.x and yPos == cell.y):\n return cell\n return False", "def get_piece(self, square):\n return self.board[square.row][square.col]", "def get_piece(self, square):\n return self.board[square.row][square.col]", "def get_tile(self, row, col):\n # replace with your code\n return self.board[row][col]", "def get_tile(self, row, col):\n # replace with your code\n return self._grid[row][col]", "def get_tile(self, row, col):\n # replace with your code\n return self._grid[row][col]", "def get_tile(self, row, col):\n # replace with your code\n return self._grid[row][col]", "def get(self,row,col):\r\n return self.puzzle[row][col]", "def get_piece(self, x, y):\n if self.in_bounds(x, y) and self.piece_at(x, y):\n return self.__board[(x, y)]\n return None", "def get_tile(self, row, col):\n # replace with your code\n return self.grid[row][col]", "def getObject(self, row, column, gameGrid=None):\n if not gameGrid:\n gameGrid = self.gameGrid\n return gameGrid.getItem(row, column)", "def get_tile(self, row, col):\n return self.grid[row][col]", "def get_tile(self, row, col):\r\n # replace with your code\r\n return self.grid[row][col]", "def get_tile(self, row, col):\n #print 'The value of tile at position: (',row,',',col,') is: ',self.grid[row][col]\n return self.grid[row][col]", "def query(self, cell: Tuple[int, int]):\n return self._board[cell[0]][cell[1]]", "def get(self, layer, row, column):\n if layer < 0 or row < 0 or column < 0:\n raise game.InvalidMoveException('The position ({}) is outside of the board'.format([layer, row, column]))\n try:\n return self._state['visible']['board'][layer][row][column]\n except:\n raise game.InvalidMoveException('The position ({}) is outside of the board'.format([layer, row, column]))", "def getItem(self, column, position):\n return self.data[column, position]", "def get_cell(self, x, y):\n if y < 0 or y >= len(self.g): return None\n if x < 0 or x >= len(self.g[y]): return None\n return self.g[y][x]", "def __getitem__(self,pt):\n return self.maze[pt.y][pt.x]", "def return_cell(self):\n\n pos = pygame.mouse.get_pos()\n\n x = pos[1] // (self.cell_size+1)\n y = pos[0] // (self.cell_size+1)\n\n return self.grid[x][y]", "def piece_at(self, row, col):\n return self.board[row + PADDING][col + PADDING]", "def __getitem__(self, index):\n # attempt to\n try:\n # cast {index} to an integer\n index = int(index)\n # if this fails\n except TypeError:\n # ask my tile do the rest\n value = self.data[self.tile.offset(index)]\n # otherwise\n else:\n # retrieve the item directly from my container\n value = self.data[index]\n # all done\n return value", "def get_tile(self, row, col):\r\n return self._grid[row][col]", "def get_tile(self, row, col):\r\n value = self.board[row][col]\r\n return value", "def get_tile(self, row, col):\r\n # replace with your code\r\n return self._cells[row][col]", "def get_tile(self, row, col):\r\n\r\n return self._board[row][col]", "def get_tile(self, row, col):\n # replace with your code\n return self._cells[row][col]", "def cell_at(self, x, y):\n\n return self.maze_map[x][y]", "def get_tile(self, row, col):\r\n # replace with your code\r\n return self._grid_tile[row][col]", "def get_game_piece_object_at_position(self, position):\n\n column, row = self.transpose_position(position)\n\n return self.get_board()[int(row)][int(column)]", "def get_tile(self, position):\n return self.tiles[position[x]][position[y]]", "def get_tile(self, row, col):\n return self._grid[row][col]", "def __getitem__(self, k):\n return self._board[k]", "def search(self, x, y):\n return self.chessboard[y][x]", "def getCell(self, row, column):\n\t\t\t\t\n\t\t\t\tif ((row is None) or (column is None)):\n\t\t\t\t\traise NotImplementedError()\n\n\t\t\t\treturn self.thing[self.convertColumn(row = row, column = column)]", "def __getitem__(self, item):\n return self.row[item]", "def get_position(self, row, column):\n position_key = \"{}{}\".format(row, column)\n return self.positions[position_key]", "def get_map_item(self, idx, col=0, absolute=False):\n\n return self.itemDataMap[self.itemIndexMap[idx] if not absolute else idx][self.get_real_col(col)]", "def cell(self, pos):\n\t\tpos = Point(pos)\n\t\tif not self.valid(pos):\n\t\t\traise KeyError('Invalid cell position: {0}'.format(pos))\n\t\treturn self.data[pos.x + pos.y * self.dims.width]", "def get_tile(self, row, col):\n tile_index = (row - 1) * self.num_col_tiles + (col - 1)\n tile = self.tiles[tile_index]\n return tile", "def __getitem__(self, index):\n return self.position[index]", "def getItemAtClick(self, event):\n pos = event.pos()\n obj = self.itemAt(pos)\n return obj", "def getItemAtClick(self, event):\n pos = event.pos()\n obj = self.itemAt(pos)\n return obj", "def __getitem__(self, item):\n return self.top[item]", "def get_xy(self, x, y):\r\n\t\treturn self.grid[y, x]", "def get_item(array, index):\n row, column = index\n return array[row][column]", "def get_tile(self, row, col):\r\n \r\n return self._cells[row][col]", "def getTile(self, position):\n columns = \"ABCDEFGH\"\n if not position[0] in columns or not position[1].isdigit():\n raise invalidPlacementError\n return self.board[columns.find(position[0])][int(position[1]) - 1]", "def __getitem__(self, key: Position) -> Tile:\n (x, y) = key\n if 0 <= x < self.width or 0 <= y < self.height:\n raise KeyError\n return self._tiles[x + y * self.width]", "def get_tile(self, row, col):\n return self._cells[row][col]", "def find_value(self, x, y):\n for cell in self.cells:\n if cell.coordinates == (x,y):\n return cell.value\n else:\n return None", "def getItem(self, i, j):\n if i < 0:\n raise IndexError('Row index must be nonnegative.')\n if j < 0:\n raise IndexError('Column index must be nonnegative.')\n\n return self.__m[i - 1][j - 1]", "def get_cell(self, row, column):\n return self.sudoku_matrix[row][column]", "def _get(self, (y, x)):\n return self[y][x]", "def get_tile(self, point):\n print \"Getting tile for %s\" % repr(point)\n return self.matrix[point.y][point.x]", "def get_cell(self, x, y):\n x1, y1 = self.transpose_coordinates(x, y)\n if self.is_in_field(x1, y1):\n return self._cells[y1][x1]\n return None", "def room_at(self, x, y):\r\n return self.__maze[x][y]", "def cell_from_xy(self,x,y):\n return self.cell_array.item((x,y))", "def __getitem__(self, pos):\n if (self.master.__class__.__name__ == 'OneDimGrid') or (issubclass(self.master.__class__, OneDimGrid)):\n return self._getitem_1d(pos)\n if (self.master.__class__.__name__ == 'Grid') or (issubclass(self.master.__class__, Grid)):\n return self._getitem_2d(pos)\n else:\n return None", "def __getitem__(self, pos):\n if (self.master.__class__.__name__ == 'OneDimGrid') or (issubclass(self.master.__class__, OneDimGrid)):\n return self._getitem_1d(pos)\n if (self.master.__class__.__name__ == 'Grid') or (issubclass(self.master.__class__, Grid)):\n return self._getitem_2d(pos)\n else:\n return None", "def GetPosition(board):\n\tfor i in range(len(board.matrix)):\n\t\tfor j in range(len(board.matrix[i])):\n\t\t\tif board.matrix[i][j]==\"X\":\n\t\t\t\treturn i,j", "def get_cell_by_coords(self, coords):\n try:\n cell = GameCell.objects.get(row=coords[0], col=coords[1], game=self)\n return cell\n except GameCell.DoesNotExist:\n return None", "def get(self, x, y):\n i = self.map[y][x]\n return self.get(i)", "def get_data(self, *pos):\n r, c = pos\n return self._grid[r][c]", "def retrievematrixelement(self, coord):\n currentelement = self.matrix\n for u in coord:\n currentelement = currentelement[u]\n\n return currentelement", "def getItem(self,row,column,default=None):\n data = self.data\n if row in data and column in data[row]:\n return data[row][column]\n else:\n return default", "def get_square(self, row, col):\n\n return self.board[row][col]", "def __call__(self, pos):\n return self.__getitem__(pos)", "def __call__(self, pos):\n return self.__getitem__(pos)", "def __getitem__(self, item):\n return self.cube[item]", "def getpiece(self, x, y, z):\n\n for p in self.pieces:\n if (p.x, p.y, p.z) == (x, y, z):\n return p", "def get_row(board, position):\n row = (position-1) // 3 * 3\n return board[row:row+3]", "def peek(self, pos_x, pos_y):\n self.validate_position(pos_x, pos_y)\n return self.map[pos_x][pos_y]", "def __getitem__(self, j):\n\t\treturn self._coords[j]", "def __getitem__(self, item):\n return self.elements[item]", "def get_cell_value(self, index):\n x, y = index\n return self.grid[y][x]", "def get_tile(self, row, col):\n # replace with your code\n if row < self._grid_height and col < self._grid_width:\n return self._grid_2048[row][col]", "def get_item(self, index: int) -> _T:\n return self.index_to_item[index]", "def get_game_cell(self, row, col):\n try:\n return GameCell.objects.get(game=self, row=row, col=col)\n except GameCell.DoesNotExist:\n return None", "def get(self, position):\n return self.numbers[position[0]][position[1]]", "def get_cell(self, location):\n if 0 <= location[0] < self.boardSize and 0 <= location[1] < self.boardSize:\n return self.board[location[0]][location[1]]\n else:\n raise Exception(\"There is no cell at the given location\")", "def __getitem__(self, index):\n if index == 0:\n return self.x\n elif index == 1:\n return self.y\n raise IndexError", "def get_tile(self, row, col):\n if row >= 0 and row < self.get_grid_height():\n if col >= 0 and col < self.get_grid_width():\n # only return if the row and column are ok\n return self._grid[row][col]", "def __getitem__(self, pos):\n row, column = pos\n if row <= self.n_rows-1 and column <= self.n_columns-1:\n return self.bits[row][column]\n else:\n return False", "def __getElementFromPairs(self, point):\n return self.maze[point[0]][point[1]]", "def __getitem__ (self, idx):\n return self.row(idx[0])[idx[1]]", "def get_node(self, x: int, y: int) -> MazeCell:\n node = self._nodes_by_row[y][x]\n assert node.x == x and node.y == y, f\"(node.x, node.y) == ({node.x}, {node.y}), but should be ({x}, {y})\"\n\n return node", "def get_piece_at_opening(self, x, y):\n self._validate_opening(x, y)\n return self._openings[x][y]", "def __getitem__(self, index):\n if isinstance(index, (tuple, list)) and len(index) == 2:\n return self.cells[index[1]][index[0]]\n return self.cells[index]", "def get_cell(self, index, column):\n idx = self.index_location(index)\n col = self.column_location(column)\n\n return self._data[idx][col]", "def get_position(self, position):" ]
[ "0.7532374", "0.73557675", "0.7260173", "0.72259563", "0.7206686", "0.7199228", "0.7171327", "0.7140547", "0.7119871", "0.71094227", "0.7099475", "0.7034289", "0.7034289", "0.7010607", "0.7004049", "0.7004049", "0.7004049", "0.69689083", "0.6966279", "0.6965098", "0.69650495", "0.6943758", "0.6938741", "0.6934945", "0.6928891", "0.6923406", "0.6917967", "0.69175863", "0.6916771", "0.69167155", "0.6908426", "0.6903486", "0.68991745", "0.68982047", "0.68884146", "0.68835974", "0.6882165", "0.68644035", "0.6847683", "0.68445915", "0.6830409", "0.68173105", "0.68077505", "0.68040204", "0.6754467", "0.67478323", "0.6746317", "0.67340237", "0.6733781", "0.6733692", "0.6731568", "0.67241246", "0.67241246", "0.67164654", "0.6708173", "0.6689318", "0.6669726", "0.66555816", "0.66547453", "0.66532475", "0.6636181", "0.6633187", "0.6619922", "0.66195655", "0.657748", "0.65750766", "0.65708756", "0.654432", "0.65440375", "0.65440375", "0.6523763", "0.6523027", "0.6518451", "0.6515415", "0.65083957", "0.6506143", "0.64931035", "0.6490694", "0.6490694", "0.6489903", "0.6480862", "0.645539", "0.64472175", "0.6446454", "0.6431089", "0.6423387", "0.64230597", "0.64162016", "0.64057493", "0.64045745", "0.63885736", "0.63867074", "0.63808835", "0.6370301", "0.63701755", "0.6362756", "0.63593936", "0.6314317", "0.6311233", "0.63054305", "0.62919796" ]
0.0
-1
Changes the position of the blank box
def swap(self, direction): directions = {'up': (-1, 0), 'down': (1, 0), 'left': (0, -1), 'right': (0, 1),} new_row = self.__blank_box[0] + directions[direction][0] new_col = self.__blank_box[1] + directions[direction][1] new_position = self.__get_box((new_row*self.__length)+new_col) self.__board[self.__blank_box[0]][self.__blank_box[1]] \ = new_position self.__board[new_row][new_col] = None self.__blank_box = (new_row, new_col) self.__set_possibilities() self.__previous_move = direction
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset_position(self):\n self.rect.left, self.rect.top = self.start_pos", "def setBlank(self, pos):\n self.tiles[-1] = pos", "def reset_position(self, x, y):\n\t\tself.grid[x][y] = self.terminal", "def positioning(self):\n pass", "def reset(self):\n self.x_pos = 10\n self.y_pos = 10\n self.line_height = 15", "def box(self) -> None:\n self.screen.box()", "def set_postition(self, position):\n self._border.set_position(position)", "def reset_position(self):\n self.goto(STARTING_POSITION)", "def set_position(self, x, y):\n self.geometry('%s%s' % (x, y))", "def hide(self):\n self.geometry(\"%dx%d%+d%+d\" % (0, 0, 0, 0))", "def set_position(self, x, y):\n self.pos = pygame.Rect(x, y, 0, 0)", "def reset_position(self): \n self.rect.x = 400\n self.rect.y = 400\n \n # Specifies the Player's spawnpoint as maze_arrangement[8][8], representing\n # the tile in the center of the maze \n self.__minotaur_x = 8\n self.__minotaur_y = 8", "def define_box_location(self):\n self.contents['Box_ID'] = np.ones(self.numatom) * self.num_box", "def insert_empty_space(frame, row, column):\n empty_label = Label(frame, text=\"\")\n empty_label.grid(row=row, column=column)", "def box(self, x, y, w, h):\n\t\tpass", "def set_position(self, x, y):\n self.position.x = x\n self.position.y = y\n self.rect.topleft = x, y", "def fillbox(self,event=None):\n \n pass", "def reset_pos(self):\n self.rect.y = random.randrange(-300, -20)\n self.rect.x = random.randrange(0, SCREEN_WIDTH)", "def _positionWindow(self):\n\t\tscreen = QtGui.QDesktopWidget().screenGeometry()\n\t\tself.setGeometry(1050, 275, 375, 350)\n\t\t# self.move( (-screen.width()/2)+200, -screen.height()/2 )", "def fill_box(self, x, y, w, h):\n\t\tpass", "def set_box(self, box=0.0):\n self.box = box", "def _place_elements(self, dt):\n self.root.size = Window.size\n center = Window.center\n self.rect.pos = center[0] + 100, center[1] + 100\n self.circle.pos = center[0] - 100, center[1] - 100", "def reset_pos(self):\n self.rect.y = random.randrange(-1000, -10)\n self.rect.x = random.randrange(0, WIDTH)", "def _positionWindow(self):\n\t\tif sys.platform=='win32':\n\t\t\tself.setGeometry(1050, 30, 375, 220)\n\t\telse:\n\t\t\tself.setGeometry(1050, 0, 375, 220)\n\t\t# self.move( (-screen.width()/2)+200, -screen.height()/2 )", "def reset(self):\n if self.num == 1:\n self.rect.centerx = 320\n elif self.num == 2:\n self.rect.centerx = 341\n elif self.num == 3:\n self.rect.centerx = 362\n elif self.num == 4:\n self.rect.centerx = 383\n self.rect.centery = 371\n self.centerx = self.rect.centerx\n self.centery = self.rect.centery\n\n self.moving_right = False\n self.moving_left = False\n self.moving_up = True\n self.moving_down = False", "def set_position( self ):\n\t\tscreen_rect = self.get_preview_window_screen_rect( )\n\n\t\twhile screen_rect.Intersects( self.GetScreenRect( ) ):\n\t\t\tpos = self.GetPosition( )\n\t\t\tself.SetPosition( ( pos[ 0 ] - 2, pos[ 1 ] + 2 ) )", "def init_position_electrodes_screen(self):\n self.line_shoulder_pos_l.hide()\n self.line_shoulder_pos_r.hide()\n self.txt_shoulder_pos_r.hide()\n self.txt_shoulder_pos_r.hide()", "def _pos(self):\n sw = self.parent.winfo_screenwidth()\n sh = self.parent.winfo_screenheight()\n w = sw * 0.8\n h = sh * 0.8\n x = (sw - w) / 2\n y = (sh - h) / 2\n self.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))", "def setPosition(self):\n self.data['pos-x'] = \"%s\" % self.x()\n self.data['pos-y'] = \"%s\" % self.y()", "def setPosition(position):", "def change_loc_coords(self, field_size):\r\n self.top_left_corner = _get_center_writing(self.button) # sets new center\r\n font_size = int(field_size * 2) # resizes font\r\n self.font = pygame.font.SysFont(None, font_size) # updates font\r", "def __moveTo(self, x, y):\n newbox = (x, y, self.currentBox[2], self.currentBox[3])\n self.__drawAndErase(boxToDraw=newbox, boxToErase=self.currentBox)\n self.currentBox = newbox", "def fillbox(self,forceUpdate=False):\n \n # Only fill the box if the dialog is visible.\n # This is an important protection against bad performance.\n if not forceUpdate and self.top.state() != \"normal\":\n return\n \n self.box.delete(0,\"end\")\n c = self.c ; i = 0\n self.positionList = [] ; tnodeList = []\n for p in c.visitedList:\n if p.exists(c) and p.v.t not in tnodeList:\n self.box.insert(i,p.headString().strip())\n tnodeList.append(p.v.t)\n self.positionList.append(p.copy())\n i += 1", "def give_space(self):\n for child in self.winfo_children():\n child.grid_configure(padx=5, pady=5)", "def _go_to_first_blank(self, box):\n for y in range(4, 41):\n self.box2.move(y, 4)\n if chr(self.box2.inch() & 0xFF) == ' ': # binary and because char is in bottom 8 bits.\n self.box2.move(y + 1, 4)\n if chr(self.box2.inch() & 0xFF) == ' ': # look for 2 blank lines in a raw\n y, x = self.box2.getyx() # store position, we can add new alert below\n try:\n if chr(self.box2.inch(y + 1, x) & 0xFF) == ' ':\n return y\n except curses.error: # this is the case where we had 2 blank lines but these were the last 2 lines\n pass # of the screen. We can't /n + write two lines so we need to proceed to screen scrolling\n # screen is filled with alerts\n self.box2.scrollok(True)\n self.box2.move(4, 4)\n self.box2.setscrreg(4, 41)\n self.box2.scroll(4)\n self.box2.box() # need to redraw borders\n return self._go_to_first_blank(self.box2)", "def hide(self):\r\n self.rect.center = (WINDOWWIDTH/2, WINDOWHEIGHT -2000)", "def middlemakevisible(self, pos):\n pass", "def keepInBounds(self):\n screenWidth, screenHeight = self.screen.get_size()\n\n self.pos.x = max(0, self.pos.x)\n self.pos.x = min(screenWidth, self.pos.x)\n\n self.pos.y = max(0, self.pos.y)\n self.pos.y = min(screenHeight, self.pos.y)", "def display(self):\n stroke(51)\n fill(self.couleur)\n rect(self.pos_x, 0, self.largeur, self.min_y)\n rect(self.pos_x, self.min_y + self.hauteur, self.largeur, util.SCREEN_Y-(self.min_y + self.hauteur))", "def assign_position(self, position):\n\n self._actual_position = list(position)\n self._position = [0, 0]\n if not self._widget_assigned:\n self._align()\n self._update_widgets()\n self._widget_assigned = True", "def set_position(self, x, y):\n self.tx = -x\n self.ty = -y", "def fix_ui(self):\n x0, y0, x1, y1 = win32gui.GetWindowRect(self._handle)\n w = x1 - x0\n h = y1 - y0\n win32gui.MoveWindow(self._handle, x0, y0, w + 1, h + 1, True)", "def move_to_start(self):\n self.pos = (SCREEN_WIDTH / 2, SCREEN_HEIGHT - 64)", "def reset_pos(self):\r\n self.rect.x = random.randrange(50, 640)\r\n self.rect.y = random.randrange(-300, -80)", "def __init__(self, master, x, y, size):\n self.master = master\n self.abs = x\n self.ord = y\n self.size= size\n self.fill= Cell.EMPTY_COLOR_BG\n self.active=False", "def __set_paddle_position(self):\n self.__window.remove(self.__paddle)\n self.__window.add(self.__paddle, (self.__window.width - self.__paddle.width) / 2,\n self.__window.height - self.__paddle_offset)", "def ensureDisplayed(frame,x=100,y=100):\r\n if wx.Display.GetFromWindow(frame) == -1:\r\n topLeft = wx.Display(0).GetGeometry().GetTopLeft()\r\n frame.MoveXY(topLeft.x+x,topLeft.y+y)", "def position(self):\r\n pass", "def update_minimap_position(self):\n\t\tbtn = self.get_active_pane().get_minimap_btn()\n\t\tif btn is not None:\n\t\t\tself.window.minimap.set_relative_to(btn)\n\t\telse:\n\t\t\tself.window.minimap.set_relative_to(self.window.bottom_panes_box)", "def Position(self, pos):\r\n\r\n self.dock_pos = pos\r\n return self", "def go_left(self):\n self.rect.centerx -= 9", "def adjust_visual(self):\n\n if (self.direction is bs.Direction.LEFT):\n self.rect.x -= 0.5 * CELL_SIZE", "def wrap(self):\n if self.center.x > SCREEN_WIDTH:\n self.center.x = 0\n if self.center.y > SCREEN_HEIGHT:\n self.center.y = 0\n if self.center.x < 0:\n self.center.x = SCREEN_WIDTH\n if self.center.y < 0:\n self.center.y = SCREEN_HEIGHT", "def update(self):\n self.rect = (self.x, self.y, self.width, self.height)", "def init_frame(self):\n self._exit_button.grid(row=0, column=2, sticky=tk.W)\n self._clear_button.grid(row=0, column=0, sticky=tk.E)\n # self._copy_button.grid(row=0, column=1, sticky=(tk.W, tk.W))\n return None", "def boundary(self):\n if self.pos.x < 0:\n self.pos.x = 0\n if self.pos.x > WIDTH - 48:\n self.pos.x = WIDTH - 48\n if self.pos.y < 0:\n self.pos.y = 0\n if self.pos.y > HEIGHT - 48:\n self.pos.y = HEIGHT - 48\n\n self.rect.topleft = self.pos", "def init_infobox(self):\n infobox = tk.Label(self, text=\"\", justify=\"left\")\n infobox.grid(row=0, column=1, sticky=\"n\")\n self.infobox = infobox", "def updatePos(self):\n self.setPos(self.centerX-self.boundingRect().width()/2.0,\n self.centerY-self.boundingRect().height()/2.0)", "def position_window(self):\n x, y = self.get_position()\n root_x = self.anchor_widget.winfo_rootx() + x\n root_y = self.anchor_widget.winfo_rooty() + y\n self.tipwindow.wm_geometry(\"+%d+%d\" % (root_x, root_y))", "def center(self):\n if self.pos != 0.0:\n self.pos = 0.0", "def spacer(self):\n (outline,_,_) = self.bounds\n return Builder(self.box, (outline, -base_height, base_height))", "def clear(self):\r\n\t\tself.grid.fill(False)", "def front_wall(self):\n self.place = \"bed\"\n print(\"You are infront of the bed.\"\n \"You look under it and find a notebook.\")\n nb = Notebook('notebook')\n nb.clue()", "def placeWindow(self):\r\n\t\t# window size\r\n\t\tw = 600\r\n\t\th = 300\r\n\t\t# find the screen size\r\n\t\tsw = self.parent.winfo_screenwidth()\r\n\t\tsh = self.parent.winfo_screenheight()\r\n\t\t# now define the location on the current screen\r\n\t\tx = (sw/2-0.5*w)\r\n\t\ty = (sh/2-0.5*h)\r\n\t\tself.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))", "def create_open_positions_grid(self):\n\n counter = 0\n col = 0\n row = 0\n\n for i in range(0, 99):\n if counter % 3 == 0:\n col = 0\n row += 1\n self.gp.addWidget(PositionPanel(), row, col)\n counter += 1\n col += 1", "def reset_position(self):\n self.set_position(copy.deepcopy(self.ab_pos))", "def delete_box(self) -> None:\n from pymol import cmd\n\n # Reset all box variables\n self.x = 0\n self.y = 0\n self.z = 0\n # self.min_x_set = 0.0\n # self.max_x_set = 0.0\n # self.min_y_set = 0.0\n # self.max_y_set = 0.0\n # self.min_z_set = 0.0\n # self.max_z_set = 0.0\n # self.angle1_set = 0.0\n # self.angle2_set = 0.0\n # self.padding_set = 3.5\n\n # Delete Box and Vertices objects in PyMOL\n cmd.delete(\"vertices\")\n cmd.delete(\"box\")\n\n # Set Box variables in the interface\n self.min_x.setValue(self._default.min_x)\n self.max_x.setValue(self._default.max_x)\n self.min_y.setValue(self._default.min_y)\n self.max_y.setValue(self._default.max_y)\n self.min_z.setValue(self._default.min_z)\n self.max_z.setValue(self._default.max_z)\n self.angle1.setValue(self._default.angle1)\n self.angle2.setValue(self._default.angle2)\n\n # Change state of buttons in the interface\n self.button_draw_box.setEnabled(True)\n self.button_redraw_box.setEnabled(False)\n self.min_x.setEnabled(False)\n self.min_y.setEnabled(False)\n self.min_z.setEnabled(False)\n self.max_x.setEnabled(False)\n self.max_y.setEnabled(False)\n self.max_z.setEnabled(False)\n self.angle1.setEnabled(False)\n self.angle2.setEnabled(False)", "def place(self,y,x):\n self.y = y\n self.x = x", "def update(self, pos):\n self.rect.topleft = (pos)", "def udpatePosition(self, screenSize):\n w, h = screenSize\n if self.bottom != None:\n self.rect.bottom = h - self.bottom\n else:\n self.rect.top = self.top\n if self.right != None:\n self.rect.right = w - self.right\n else:\n self.rect.left = self.left", "def update(self): \n super().update()\n if self.center_x < constants.left_limit:\n self.center_x = self.screen_width + constants.offscreen_space\n if self.center_x > self.screen_width + constants.offscreen_space:\n self.center_x = constants.left_limit\n if self.center_y > self.screen_height + constants.offscreen_space:\n self.center_y = constants.bottom_limit\n if self.center_y < constants.bottom_limit:\n self.center_y = self.screen_height + constants.offscreen_space", "def setDesiredPosition(self, x, y):\n (self.setX, self.setY) = (x , y)", "def autostop():", "def move_west(self):\n self.horizontal = (self.horizontal * 2)[1:5]\n self.vertical[0] = self.horizontal[1]\n self.vertical[2] = self.horizontal[3]", "def create(self):\n self.panel = pg.rect.Rect(self.position, self.dimensions)", "def edit_widget_focus(self):\n if self.goto:\n self.goto_node()\n self.update_position(self.get_position())", "def center_mario(self):\n self.rect.midbottom = self.screen_rect.midbottom\n self.x, self.y = float(self.rect.x), float(self.rect.y)", "def store_empty_graphic_box(self):\n for box in self.laby.empty_box():\n x = box[0] * 40\n y = box[1] * 40\n self.store_emptyBox.append((y, x))\n return self.store_emptyBox", "def correct_position(self):\n\n width = self.screen.get_width()\n height = self.screen.get_height()\n\n if self.last_screen_dimensions[\"width\"] > width:\n self.x -= self.last_screen_dimensions[\"width\"] - width\n\n if self.last_screen_dimensions[\"height\"] > height:\n self.y -= self.last_screen_dimensions[\"height\"] - height", "def centre(self):\n self.top.update_idletasks()\n # The horizontal position is calculated as (screenwidth - window_width)/2\n hpos = int((self.top.winfo_screenwidth() - self.top.winfo_width())/2)\n # And vertical position the same, but with the height dimensions\n vpos = int((self.top.winfo_screenheight() - self.top.winfo_height())/2)\n # And the move call repositions the window\n self.top.geometry('+{x}+{y}'.format(x=hpos, y=vpos))", "def draw(self):\n if self.master != None :\n fill = self.fill\n #fill = Cell.FILLED_COLOR_BG\n outline = Cell.EMPTY_COLOR_BORDER\n\n #if not self.fill:\n # fill = Cell.EMPTY_COLOR_BG\n # outline = Cell.EMPTY_COLOR_BORDER\n\n xmin = self.abs * self.size\n xmax = xmin + self.size\n ymin = self.ord * self.size\n ymax = ymin + self.size\n\n self.master.create_rectangle(xmin, ymin, xmax, ymax, fill = fill, outline = outline)", "def reset(self, playerx, playery):\n self.rect.center = (playerx, playery)", "def _blank_screen(self):\n self._screen.fill(self._bgcolor)\n pygame.display.update()", "def setBorder():\n dislin.pagera()", "def draw(self):\n if self.master != None :\n outline = Cell.FILLED_COLOR_BORDER if self.fill else Cell.EMPTY_COLOR_BORDER\n\n xmin = self.abs * self.size\n xmax = xmin + self.size\n ymin = self.ord * self.size\n ymax = ymin + self.size\n\n self.master.create_rectangle(xmin, ymin, xmax, ymax, fill = self.fill, outline = outline)", "def leftmakevisible(self, pos):\n pass", "def center_ava(self):\n\t\tself.rect.midbottom = self.screen_rect.midbottom\n\t\tself.x = float(self.rect.x)", "def update (self, pos):\n self.rect.topleft = (pos)", "def switch_origin(self):\n self.origin = 'bottom' if self.origin == 'top' else 'top'", "def int_33H_4(self):\r\n horizontal_position = self.registers['CX'].get_int(-1)\r\n vertical_position = self.registers['DX'].get_int(-1)\r\n print(horizontal_position, vertical_position)\r\n MainWindow.set_cursor_poisition(horizontal_position, vertical_position)", "def modal(self):\r\n self.win.move(0, 0)\r\n self.editing = True\r\n goxapi.start_thread(self.cursor_placement_thread, \"TextBox cursor placement\")\r\n self.value = self.box.edit(self.validator)\r\n self.editing = False\r\n return self.result", "def aktualisiere(self):\n if(self.zeichnung == None):\n self.zeichnung = self.leinwand.create_image(self.x, self.y, \n image=self.grafik)\n delta_x = self.zielX - self.x\n delta_y = self.zielY - self.y\n self.leinwand.move(self.zeichnung, delta_x, delta_y)\n\n self.x = self.zielX\n self.y = self.zielY", "def leftframeborderoff(cls):\n cls.left_frame['highlightthickness'] = 0", "def border(self):\n ...", "def clear_box():\n blank_labels.destroy()\n enter_button['state'] = NORMAL # enables the ENTER button\n text_box.config(state='normal') # enables the entry box\n text_box.delete(0, 'end') # clears the entry box", "def set_position(self, position):\r\n\r\n self.position = position\r\n if (self.rect):\r\n self.rect.x = position[0]\r\n self.rect.y = position[1]", "def set_box(self) -> None:\n from pymol import cmd\n\n # Delete Box object in PyMOL\n if \"box\" in cmd.get_names(\"selections\"):\n cmd.delete(\"box\")\n # Get dimensions of selected residues\n selection = \"sele\"\n if selection in cmd.get_names(\"selections\"):\n ([min_x, min_y, min_z], [max_x, max_y, max_z]) = cmd.get_extent(selection)\n else:\n ([min_x, min_y, min_z], [max_x, max_y, max_z]) = cmd.get_extent(\"\")\n \n # Get center of each dimension (x, y, z)\n self.x = (min_x + max_x) / 2\n self.y = (min_y + max_y) / 2\n self.z = (min_z + max_z) / 2\n\n # Set Box variables in interface\n self.min_x.setValue(round(self.x - (min_x - self.padding.value()), 1))\n self.max_x.setValue(round((max_x + self.padding.value()) - self.x, 1))\n self.min_y.setValue(round(self.y - (min_y - self.padding.value()), 1))\n self.max_y.setValue(round((max_y + self.padding.value()) - self.y, 1))\n self.min_z.setValue(round(self.z - (min_z - self.padding.value()), 1))\n self.max_z.setValue(round((max_z + self.padding.value()) - self.z, 1))\n self.angle1.setValue(0)\n self.angle2.setValue(0)\n\n # Setting background box values\n self.min_x_set = self.min_x.value()\n self.max_x_set = self.max_x.value()\n self.min_y_set = self.min_y.value()\n self.max_y_set = self.max_y.value()\n self.min_z_set = self.min_z.value()\n self.max_z_set = self.max_z.value()\n self.angle1_set = self.angle1.value()\n self.angle2_set = self.angle2.value()\n self.padding_set = self.padding.value()\n\n # Draw box\n self.draw_box()\n\n # Enable/Disable buttons\n self.button_draw_box.setEnabled(False)\n self.button_redraw_box.setEnabled(True)\n self.min_x.setEnabled(True)\n self.min_y.setEnabled(True)\n self.min_z.setEnabled(True)\n self.max_x.setEnabled(True)\n self.max_y.setEnabled(True)\n self.max_z.setEnabled(True)\n self.angle1.setEnabled(True)\n self.angle2.setEnabled(True)", "def rebuild(self):\n self.set_image(self.ui_manager.get_universal_empty_surface())\n\n if self.text_block is not None:\n self.text_block.set_dimensions((self.rect_width, -1))\n\n self.relative_rect.height = self.text_block.rect.height\n self.relative_rect.width = self.text_block.rect.width\n self.rect.width = self.text_block.rect.width\n self.rect.height = self.text_block.rect.height", "def _place_board(self, board):\n for i, row in enumerate(board):\n for j, widget in enumerate(row):\n widget.grid(row = i, column = j)", "def makeBox(self) -> None:\n self.state[CASH] = self.state[CASH] + 1", "def set_top_widget(self, widg):\r\n if widg in self.widgets:\r\n self.widgets.remove(widg)\r\n self.widgets.insert(0, widg)\r\n for i in self.widgets:\r\n if i.get_visible():\r\n if not i == widg:\r\n i.unfocus()" ]
[ "0.6971761", "0.67601514", "0.6497059", "0.6478759", "0.64567703", "0.6452853", "0.6386973", "0.63737553", "0.63284373", "0.63279796", "0.6298735", "0.62748224", "0.6256663", "0.62447906", "0.6181412", "0.61722535", "0.61675096", "0.61479", "0.61474043", "0.6145806", "0.6125969", "0.60712224", "0.60697937", "0.605309", "0.60522604", "0.60450625", "0.60426724", "0.60354", "0.6001044", "0.59960055", "0.59947354", "0.5982987", "0.5975372", "0.5970915", "0.594212", "0.59307194", "0.5927875", "0.5913266", "0.59091467", "0.58975226", "0.5881052", "0.58546305", "0.585256", "0.5841876", "0.5833262", "0.58020985", "0.57975924", "0.5790575", "0.57850343", "0.5777846", "0.5769525", "0.5759633", "0.57582283", "0.57459164", "0.5744507", "0.5744092", "0.5736396", "0.5731663", "0.57295656", "0.5728435", "0.5722651", "0.57147396", "0.56883126", "0.5688294", "0.566931", "0.56666553", "0.56637603", "0.5651052", "0.5642038", "0.5641802", "0.56346965", "0.5634098", "0.56319064", "0.562617", "0.56238014", "0.5615895", "0.5610655", "0.55967903", "0.5593471", "0.5587869", "0.5585209", "0.5577372", "0.5570864", "0.55708295", "0.556563", "0.5563126", "0.55584663", "0.55551875", "0.55470085", "0.5545766", "0.55388093", "0.55192536", "0.55174184", "0.55145913", "0.5508478", "0.5505768", "0.55008394", "0.55001456", "0.5497344", "0.5495213", "0.5492993" ]
0.0
-1
Return the content of a file as a string without newlines.
def read_file(file_path): file_string = '' with open(file_path, 'r', newline='') as file: for line in file: file_string = file_string + line.rstrip('\n') return file_string
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_file_content(filename):\n file_contents = ''\n with open(filename) as f:\n file_contents = f.read()\n return file_contents", "def get_file_content(self, file_name: str):\n file_name = Path(__file__).absolute().parents[1].joinpath(file_name)\n try:\n with file_name.open('r') as file:\n intermediate = file.readlines()\n return ''.join(intermediate)\n except FileNotFoundError as message:\n self.logger.error(message)\n return ''", "def file2str(file):\n with open(file, \"r\") as textFile:\n return textFile.read()", "def contents(file):\n with open(file) as f:\n return f.read()", "def read_file(file):\n try:\n with open(file, \"r\") as f:\n content = f.read().replace(\"\\n\", \"\")\n return content\n except:\n return f\"[ERROR]: could not open '{file}'\"", "def file_to_str(fname):\n data = None\n # rU = read with Universal line terminator\n with open(fname, 'rU') as f:\n data = f.read()\n return data", "def file_to_string(path_to_file):\n\t\twith open(path_to_file, 'r') as f:\n\t\t\tcontent = f.read()\n\t\treturn content", "def get_file_content(filename):\n fp = open(filename, \"r\")\n file_content = fp.read()\n fp.close()\n\n # if the last character is an end of line, remove it\n if file_content[-1] == '\\n':\n file_content = file_content[:-1]\n return file_content", "def file_to_string(file_name):\n with open(file_name, 'r') as f:\n text = f.read()\n # delete original file\n os.remove(file_name)\n return text", "def get_file_contents(filename):\n with open(filename, 'r') as f:\n content = f.read()\n return content", "def ReadFileIntoString(filepath):\n with open(filepath, 'r') as file_handle:\n contents = file_handle.read()\n return contents", "def read_file(file):\n with open(file, 'r') as f:\n file_string = f.read()\n return file_string", "def load_file():\n s = ''\n\n with open(FILE_PATH) as f:\n for line in f:\n # .rstrip method gets rid of new line \"\\n\" characters\n s = s + line.rstrip() \n return s", "def getFileContent(fileName, encoding = \"UTF-8\"):\n file = io.open(fileName, mode = \"r\", encoding = encoding)\n text = file.read()\n file.close()\n return text", "def get_content(filename=u''):\n if filename:\n inp = open(filename)\n else:\n inp = sys.stdin\n\n try:\n return unicode(inp.read(), encoding='utf-8')\n except KeyboardInterrupt:\n return u''", "def read_file(file_path, mode='r', encoding=\"utf-8\"):\n with codecs.open(file_path, mode, encoding=encoding) as fp:\n return fp.read().strip()", "def read_file(file) -> str:\n file = open(file, \"r\")\n my_string = file.read()\n return get_clean_text(my_string)", "def get_file_text(file_name):\n\tf = open(file_name, 'r')\n\ttext = f.read()\n\treturn text", "def open_and_read_file(file_path):\n\n # Open file and read into memory\n text = open(file_path).read().rstrip()\n\n # Replace newlines with space\n #text = text.replace('\\n', ' ')\n\n return text", "def readfile(filename):\n\n infile = open(filename, \"r\") # open file for reading\n\n # Use Python's file read function to read the file contents\n filetext = infile.read().splitlines()\n\n infile.close() # close the file\n\n return filetext # the text of the file, as a single string", "def read_file(self, file: Path) -> str:\n with open(file) as f:\n return f.read()", "def read_text_from_file(file_path):\n validate_txt_extension(file_path)\n with open(file_path) as file:\n # Read all the lines from the file, and concatenate into a single string.\n return ''.join([line for line in file])", "def read_file(file_path):\n\n text = ''\n with open(file_path, 'r') as file:\n for line in file.readlines():\n text += line\n return text", "def readContent(file):\n \n with open(file, \"r\", encoding = \"utf-8\") as f:\n return f.read()", "def read_file(file_name, enc=\"latin-1\"):\n f = open(file_name, \"r\", encoding=enc)\n content = \"\".join(f.readlines())\n f.close()\n return content", "def getFileContent(fn):\n content = None\n if not os.path.exists(fn):\n print(\"Can not open file \" + fn)\n else:\n with open(fn, \"rb\") as f:\n content = f.readlines()\n return content", "def read_text_file(str_name_file: str):\n content: str = ''\n with open(str_name_file, mode=\"r\", encoding='utf-8') as file:\n print(\"file being read: \" + str_name_file + \"\\n\")\n content = file.read()\n return content", "def txt2str(file: str) -> str:\n return get_first_line(file)", "def get_text_from_file(filepath):\n with open(filepath, 'r') as f:\n return f.read()", "def read_file(filename: str, mode: str = \"r\") -> str:\n with open(filename, mode) as file:\n file_content = file.read()\n return file_content", "def read_from_file(filename):\n with open(filename, 'r') as f:\n lines = [line for line in f]\n\n return \"\".join(lines)", "def ofile_string(self):\n fp = self.ofile_handle()\n if (fp):\n return \"\\n\".join([line.rstrip() for line in fp])\n return None", "def read_file(filename):\n return open(filename).read()", "def source_file_contents(source_dir, file_name):\n file_n_path = join(source_dir, file_name)\n with open(file_n_path, encoding='utf-8') as checked:\n return checked.read().rstrip()", "def slurp(path):\n with open(path) as f:\n return f.read().strip()", "def efile_string(self):\n fp = self.efile_handle()\n if (fp):\n return \"\\n\".join([line.rstrip() for line in fp])\n return None", "def read_file(self, file):\n buffer = ''\n for line in file.readlines():\n line = line.strip()\n if not line.startswith('#'):\n buffer += ' ' + line\n return buffer", "def read_file(file):\n with open(file, \"r\") as fid:\n return fid.read()", "def getFileContent(fileName):\n with open(fileName, \"r\") as targets:\n lines = targets.readlines()\n i = 0\n #Remove the \\n\n for line in lines:\n lines[i] = line.rstrip()\n i += 1\n return lines", "def file_contents(filename=None, content=None):\n logging.debug('file_contents()')\n if content:\n f = open(filename, 'w')\n f.write(content)\n f.close()\n \n try:\n f = open(filename, 'r')\n text = f.read()\n f.close()\n except IOError:\n text = None\n\n return text", "def file_to_string(file_path):\n data = ''\n try:\n with open(file_path, 'r') as file:\n data = file.read()\n file.close()\n except FileNotFoundError as err: # Sublime give an error, but it's not.\n print(Bcolors.FAIL + 'ERROR: ' + file_path + ' not found.' + Bcolors.ENDC)\n print(str(err))\n sys.exit(2)\n except PermissionError as err:\n print(Bcolors.FAIL + 'ERROR: ' + file_path + ', Permission Denied.' + Bcolors.ENDC)\n print(str(err))\n sys.exit(2)\n return data", "def fileGetContents(sFilename):\n with open(sFilename) as f:\n return f.read()", "def getOneFileContent(fpath):\n\twith open(fpath, 'r') as contentFile:\n\t\tdocStr = contentFile.read()\n\treturn docStr", "def read_file(filename):\n f = open(filename)\n contents = f.read()\n f.close()\n return contents", "def read_file(file_name):\n\n with open (file_name) as fd:\n content = fd.readlines()\n content = [x.strip() for x in content]\n return content", "def read_as_text(filename: str) -> str:\n with open(filename) as file_handle:\n txt = file_handle.read()\n return txt", "def openFile(filepath):\n assert checkExistenceFile(filepath), \"filepath does not exist\"\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n text = \" \".join(map(lambda x: x.rstrip(\"\\n\"), f.readlines()))\n return text", "def getFileContent(self, filePath, mode):\n with open(filePath, mode) as my_file:\n return my_file.read()", "def get_file_contents(file_name):\n\n\tf = open(file_name)\n\tlines = f.readlines()\n\tf.close()\n\treturn lines", "def contents(filepath):\n f = open(filepath, 'r')\n rval = [x.rstrip(\"\\r\\n\") for x in f.readlines()]\n f.close()\n return rval", "def read_file(filename=\"\"):\n with open(filename, 'r') as f:\n f_contents = f.read()\n print(f_contents, end='')", "def local_file_as_string(self, file_path):\n with open(file_path, 'rb') as file:\n string = file.read().decode('utf-8')\n return string", "def read_file(file_path):\n with open(file_path, 'r') as infile:\n return infile.read()", "def _read_file(self, filePath):\n with open(filePath) as f:\n fileContent = f.read()\n f.close()\n return fileContent.strip()", "def replace_with_file_contents(fname):\n try:\n with open(os.path.expanduser(fname[0])) as source_file:\n result = source_file.read()\n except IOError:\n result = '< %s' % fname[0] # wasn't a file after all\n\n # TODO: IF pyparsing input parser logic gets fixed to support empty file, add support to get from paste buffer\n return result", "def read(file):\n with open(file, 'r') as file:\n return file.read()", "def readfile(filename):\n with open(filename, encoding=\"utf-8\") as file:\n raw = file.read()\n return raw", "def get_file_content(self):\n s = StringIO.StringIO()\n\n s.write(self.get_header())\n s.write(self.get_content())\n\n return s.getvalue()", "def _file_read(self, file: str) -> str:\n with open(f\"tests/resources/{file}\", \"r\") as fs:\n result = \"\\n\".join(fs.read().splitlines())\n return result", "def get_text(filename):\n with open(filename, 'r', encoding='utf-8') as file:\n file_text = file.read()\n return file_text", "def read_full_file(filename, options=\"rb+\"):\n with open(filename, options) as f:\n text = f.read()\n return text", "def read_raw(file_path):\n file = open(file_path, 'rb')\n content = file.read()\n file.close()\n return content", "def _(fname: str) -> str:\n with open(os.path.join(os.getcwd(), fname)) as readme:\n content = readme.read() or '' # prevent ``content = None``\n return content", "def readText(fileName):\n fileText = \"\"\n with open(fileName,\"r\") as fileObject:\n fileText = fileObject.read()\n \n return fileText", "def read_file(filename):\n fh = open(filename, \"r\")\n file_str = fh.read()\n file_str = re.sub(\"$\\n\", \"\", file_str)\n fh.close()\n return file_str", "def read_file(filename):\n with open(filename) as fp:\n return fp.read()", "def getFileContents(filename, mode=\"r\", encoding=None):\n\n with withFileLock(\"reading file %s\" % filename):\n with openTextFile(filename, mode, encoding=encoding) as f:\n return f.read()", "def SimpleRead(fn):\n content = \"\"\n try:\n content = open(fn).read()\n except :\n print(\"Failed to read file: %s\\n\"%(fn))\n print sys.exc_info()[1]\n\n return content", "def get_example_file(filename):\n fullpath = get_example_filepath(filename)\n # Need to ensure that CRCRLF remain intact\n with open(fullpath, \"rb\") as fh:\n data = fh.read().decode(\"utf-8\")\n return data", "def open_and_read_file(file_path):\n\n # Read the file, return text as a string titled \"contents\"\n contents = open(file_path).read()\n\n # Return contents of your file as one long string\n return contents", "def get_file_content(self):\n return \"\\n\".join(self._vim.current.buffer)", "def _read_file(self) -> str:\n with open(self._file_name) as fp:\n return fp.read()", "def readFile(fileName):\n with open(fileName, 'r', encoding='utf-8') as f:\n text = f.read()\n return text", "def load_text_data(filepath: str) -> str:\n with open(filepath) as text_file:\n text_from_file = text_file.read()\n\n return str.strip(text_from_file) # str.strip removes leading/trailing whitespace", "def get_file_content(path_str, encoding=\"utf8\"):\n try:\n with open(path_str, mode=\"r\", encoding=encoding) as file_handle:\n file_content = file_handle.read()\n return file_content\n if not file_content:\n logging.info(\"File %s was empty\", )\n return \"\"\n except IOError as io_err:\n logging.info(\"I/O error while reading %s code:%s, error: %s\",\n path_str, io_err.errno, io_err.strerror)", "def read_file(filepath: str) -> str:\n with open(filepath, \"r\") as filep:\n return filep.read()", "def read_file(filename):\n with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:\n content = f.read()\n return content", "def get_content_from_file(path):\n\n\t\tPathUtil.ensure_path_exists(path)\n\t\twith open(path) as file:\n\t\t\tfile_content = file.read()\n\t\treturn file_content", "def read_from_file(file_name):\n with open(file_name, \"rb\") as text_file:\n return text_file.read()", "def read_file(file):\n f = open(file, \"r\", encoding=\"utf8\")\n return f.read()", "def read_file(filename):\n open_kwargs = {}\n if sys.version_info.major == 3:\n open_kwargs = {'encoding': 'utf-8'}\n\n path = os.path.abspath(os.path.dirname(__file__))\n filepath = os.path.join(path, filename)\n with open(filepath, **open_kwargs) as filecontents:\n return filecontents.read()", "def read_file(file_name):\n with open(file_name, 'r') as f:\n return f.read()", "def GetFileContents(self, filename):\n logging.debug(\"GetFileContents(%s)\" % (filename))\n with tempfile.NamedTemporaryFile(mode='w') as t:\n self.GetFile(filename, t.name)\n with open(t.name, 'r', encoding='UTF-8') as f2:\n res = f2.read()\n logging.debug(\"GetFileContents(%s)->%s\" % (filename, res))\n return res", "def read_file(path):\n try:\n with open(path, 'r') as text_file:\n return \"\".join(text_file.readlines()).strip()\n except IOError:\n exit(\"Error: file '%s' is not readable!\" % path)", "def read_data() -> str:\n with open('input.txt') as input_file:\n return input_file.read()", "def read_file(file_name):\n with open(file_name, \"r\") as f:\n return f.read()", "def readFile(self, name):\n\t\ttry:\n\t\t\tf = open(name, 'r')\n\t\t\tlines = f.readlines()\n\t\t\tf.close()\n\t\texcept IOError:\n\t\t\treturn None\n\n\t\treturn join(lines, \"\")", "def read_file(file_path):\n contents = None\n # opens the file and by default it sends 'r' as mode, so, it ensures the file will be used only for reading,\n # and not writing, since it's not necessary\n file_to_read = open(file_path)\n if file_to_read.mode == 'r':\n # saves the text of the file into a variable\n contents = file_to_read.read()\n\n # close the file\n file_to_read.close()\n return contents", "def read_file(self, file_name: str):\n file_text = []\n with open(file_name, encoding='utf-8', errors='ignore') as file:\n for line in file:\n line = line.strip()\n file_text.append(line)\n return file_text", "def get_file_contents(path):\n try:\n with open(path) as f:\n return f.read()\n except IOError:\n return None", "def read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read().strip()", "def read_file(filename):\n path = os.path.abspath(os.path.dirname(__file__))\n filepath = os.path.join(path, filename)\n try:\n return open(filepath).read()\n except:\n return ''", "def _read_file(fname):\n with open(fname) as fobj:\n for line in fobj:\n yield _tostr(line).strip()", "def read_file(name_file):\n with open(name_file, 'r') as file:\n return file.read()", "def read_file():\n with open(FILE_NAME) as f:\n data = f.read()\n return data", "def read_file(filename):\n path = os.path.abspath(os.path.dirname(__file__))\n filepath = os.path.join(path, filename)\n try:\n return open(filepath).read()\n except IOError:\n return ''", "def read_file(self, file_name):\n\n with open(file_name, 'r') as file_input:\n file_content = file_input.read()\n return file_content", "def readfile(file):\n with open(file, 'r') as f:\n data = f.read().splitlines()\n return data", "def _get_string(self):\n result = self.sfile.readline().rstrip('\\n')\n return result", "def get_file_contents(directory, filename):\n full_path = \"%s/%s\" % (directory,filename)\n content = \"\"\n if os.path.exists(full_path):\n fp = open(full_path, \"r\")\n content = fp.read()\n fp.close()\n return content" ]
[ "0.7609623", "0.75446004", "0.7528579", "0.7510224", "0.7494273", "0.7474576", "0.74162525", "0.7388739", "0.7182413", "0.71716714", "0.71541095", "0.7126843", "0.7116395", "0.7087866", "0.704822", "0.69866985", "0.6974244", "0.69514704", "0.69032836", "0.68963784", "0.6877578", "0.6859337", "0.6854062", "0.6849453", "0.68491906", "0.6848757", "0.6813867", "0.67990494", "0.67973983", "0.67950964", "0.6790438", "0.6782802", "0.67803746", "0.6775408", "0.6773612", "0.6755686", "0.6749821", "0.6748313", "0.6742843", "0.67362195", "0.67335373", "0.6727873", "0.671142", "0.67064434", "0.66838276", "0.6666997", "0.6661014", "0.6636973", "0.66255593", "0.6598299", "0.6595789", "0.65856916", "0.6566628", "0.6546741", "0.6543496", "0.6540937", "0.6528089", "0.65234345", "0.6519846", "0.6514002", "0.65065306", "0.6504732", "0.65044385", "0.650228", "0.6487045", "0.64852643", "0.6474215", "0.6471169", "0.64631355", "0.64627254", "0.64406574", "0.6433736", "0.6430705", "0.6428006", "0.64068455", "0.64015317", "0.6400229", "0.6396225", "0.63745415", "0.637422", "0.6363607", "0.63624185", "0.63613933", "0.63583237", "0.63498133", "0.6346441", "0.63392925", "0.6335438", "0.63175714", "0.63043416", "0.62988037", "0.6289452", "0.6283306", "0.6269468", "0.62627125", "0.6259178", "0.6256938", "0.62540144", "0.6242426", "0.62372077" ]
0.7359542
8
plot the timestamped data for the temperature
def find_records(): print("begin find records") study_list = retrieve_ref('study_list') sensor_list = retrieve_ref('sensor_list') # sensor_unit_list = retrieve_ref('sensor_unit_list') for study in study_list: # print('study = ' + str(study)) source_path = os.path.join(study, 'source') # print('source_path = ' + str(source_path)) source_folders = os.listdir(source_path) # print(str(study) + ' source_folders = ') # print(source_folders) df_meta = pd.DataFrame() df_meta['source_path'] = source_folders save_meta(study, df_meta) record_to_summary(study, 'Records found', str(len(source_folders))) print("completed find records")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_temp():\r\n work_book = xlrd.open_workbook(\"Temp.xls\")\r\n sheet1 = work_book.sheet_by_name(\"Temperature\")\r\n time_x = sheet1.col_values(1)\r\n temp_y = sheet1.col_values(0)\r\n plt.title(\"Time\")\r\n plt.xlabel(\"Time\")\r\n plt.ylabel(\"Temperature\")\r\n plt.plot(time_x, temp_y)\r\n plt.show()", "def plot_temperature(timestamps,timelabels,temperatures):\n\n #into x,y data and 2nd column as the x-axis tick\n TOOLS = \"pan,wheel_zoom,box_zoom,reset,save,box_select,lasso_select\"\n p = plt.figure(title=\"Christchurch Temperature\", tools=TOOLS,\n x_axis_label='Record Time', y_axis_label='Temperature(\"C)')\n\n # add a line renderer with legend and line thickness\n\n p.xaxis.ticker = timestamps\n p.xaxis.major_label_overrides=(dict(zip(timestamps,timelabels)))\n p.xaxis.major_label_orientation = pi/2\n p.xaxis.ticker.desired_num_ticks = 1\n\n p.line(timestamps,temperatures, legend_label=\"Temperature\", line_width=2)\n\n from bokeh.resources import CDN\n from bokeh.embed import components\n script, div = components(p)\n \n return get_bokeh_plot_head(), script, div", "def get_temp():\n epts = [\"cage_coldPlate_temp\", \"cage_pressure\"]\n # t_earlier_aug = '2019-10-02T00:00'\n # t_later_aug = datetime.utcnow().isoformat()\n t_earlier_aug = '2019-09-27T13:00'\n t_later_aug = '2019-09-28T19:49'\n dfs = pandas_db_query(epts, t_earlier_aug, t_later_aug)\n print(dfs[epts[0]].tail())\n\n exit()\n\n xv = dfs[epts[0]][\"timestamp\"]\n yv = dfs[epts[0]][epts[0]]\n plt.plot(xv, yv, '-b')\n plt.ylabel(epts[0], ha='right', y=1)\n\n p1a = plt.gca().twinx()\n xv = dfs[epts[1]][\"timestamp\"]\n yv = dfs[epts[1]][epts[1]]\n p1a.set_ylabel(epts[1], color='r', ha='right', y=1)\n p1a.tick_params('y', colors='r')\n p1a.semilogy(xv, yv, '-r')\n\n plt.gcf().autofmt_xdate()\n plt.tight_layout()\n plt.show()", "def plot_humidity(timestamps,timelabels,humidities):\n\n #into x,y data and 2nd column as the x-axis tick\n TOOLS = \"pan,wheel_zoom,box_zoom,reset,save,box_select,lasso_select\"\n p = plt.figure(title=\"Christchurch Humidity\", tools=TOOLS,\n x_axis_label='Record Time', y_axis_label='Humidity(%)')\n\n # add a line renderer with legend and line thickness\n\n p.xaxis.ticker = timestamps\n p.xaxis.major_label_overrides=(dict(zip(timestamps,timelabels)))\n p.xaxis.major_label_orientation = pi/2\n p.xaxis.ticker.desired_num_ticks = 1\n\n p.line(timestamps,humidities, legend_label=\"Humidity\", line_width=2)\n\n from bokeh.resources import CDN\n from bokeh.embed import components\n script, div = components(p)\n \n return get_bokeh_plot_head(), script, div", "def temp_plot(city):\n\n API_key = 'Api key here' # You can get one free from: https://openweathermap.org/appid\n owm = OWM(API_key)\n fc = owm.three_hours_forecast(city)\n f = fc.get_forecast()\n weather_lst = f.get_weathers()\n\n weather_days = [weather_lst[i] for i in range(0, len(weather_lst), 8)]\n temps = [i.get_temperature(unit='celsius')['temp'] for i in weather_days]\n dates = [i.get_reference_time(timeformat='iso').split(\" \")[0] for i in weather_days]\n\n plt.plot(dates, temps, marker='o')\n plt.title('Average daily temperature in {}'.format(city))\n plt.xlabel('Day')\n plt.ylabel('Temperature °C')\n plt.axis(ymin=0, ymax=30)\n plt.show()", "def plot_tseries(*args, **kwargs) :\n data = kwargs.pop('data')\n return data.dropna().plot(x=args[0], y=args[1], **kwargs)", "def plotTime(self):\n plt.figure()\n t = [i for i in range(len(self.nodes_infected))]\n print(t)\n plt.title('Nodos infectados vs Tiempo')\n plt.xlabel('Instantes de tiempo')\n plt.ylabel('# de nodos infectados')\n plt.plot(t, self.nodes_infected)\n plt.grid(True)\n plt.show()", "def plot_time(self, X, x0, t):\n\n Pressure = [Solution(self, (x-x0)/t).pressure for x in X]\n Velocity = [Solution(self, (x-x0)/t).velocity for x in X]\n Density = [Solution(self, (x-x0)/t).rho for x in X]\n\n fig, axs = plt.subplots(3, sharex=True)\n fig.suptitle(\"Solution of the Riemann problem\\nat t = {}s\".format(t))\n axs[0].plot(X, Density)\n axs[1].plot(X, Velocity)\n axs[2].plot(X, Pressure)\n\n axs[0].grid()\n axs[0].set(ylabel = \"Density\")\n axs[1].grid()\n axs[1].set(ylabel = \"Velocity\")\n axs[2].grid()\n axs[2].set(ylabel = \"Pressure\")\n\n plt.xlabel(\"Location x\")", "def plot_time_series(df, plt):\n # Remove the plot frame lines\n delta = (df[\"timestamp\"].max() - df[\"timestamp\"].min()) / 10\n plt.xticks(\n np.arange(\n df[\"timestamp\"].min(),\n df[\"timestamp\"].max(),\n step=np.around(delta, decimals=1),\n )\n )\n plt.grid()", "def plot_data(self):", "def plot(self, add_labels=False,\n _type=\"scatter\", color=None,\n layer=1, edgecolor='black'):\n _type = _type.lower()\n assert _type in (\"scatter\", \"line\")\n if _type == \"scatter\":\n scatter(\n self.times,\n self.temperatures,\n alpha=.8,\n zorder=layer,\n edgecolors=edgecolor\n )\n elif _type == \"line\":\n plt.plot(self.times,\n self.temperatures,\n \"-\", color=color,\n zorder=layer)\n if add_labels:\n self.set_plot_labels()", "def plot(self, x_values=None, y_values=None,\n x_experiment_values=None, y_experiment_values=None) -> None:\n\n # TODO: make the plot at the beginning to say something, like \"Press RUN\"\n self.temperature_subplot.cla()\n self.temperature_subplot.set_title('Temperature plot')\n self.temperature_subplot.set_xlabel(\"Time [s]\")\n self.temperature_subplot.set_ylabel(\"Temperature [°C]\")\n\n # In both cases of plotting we have to make sure we plot the\n # data with the same dimensions, therefore we first determine\n # she shortest array and plot just that data\n\n if x_values is not None and y_values is not None:\n min_length = min(len(x_values), len(y_values))\n self.temperature_subplot.plot(x_values[:min_length],\n y_values[:min_length],\n label='Calculated Data',\n color=\"blue\")\n\n if x_experiment_values is not None and y_experiment_values is not None:\n min_length = min(len(x_experiment_values), len(y_experiment_values))\n self.temperature_subplot.plot(x_experiment_values[:min_length],\n y_experiment_values[:min_length],\n label='Experiment Data',\n color=\"orange\")\n\n self.temperature_subplot.legend()\n self.draw()", "def plot_data_timeseries(X, output_file=None):\n\n n_features = X.shape[1]\n\n fig, ax = plt.subplots(nrows=n_features, figsize=(9, 4 * n_features),\n sharex=True, squeeze=False)\n\n for i in range(n_features):\n ax[i, 0].plot(X[:, i], '-')\n\n ax[i, 0].set_ylabel(r'$x_{:d}$'.format(i + 1))\n\n ax[i, 0].grid(ls='--', color='gray', alpha=0.5)\n\n if i == n_features - 1:\n ax[i, 0].set_xlabel('Time')\n\n if output_file is not None and output_file:\n plt.savefig(output_file, bbox_inches='tight')\n\n plt.show()", "def plot(axes, axis, values, c='chartreuse'):\n a = axes[axis]\n a.set_xlabel('time (s)')\n x = np.array(range(len(values))) / 1000\n dim = 'x' if axis == 0 else 'y' if axis == 1 else 'z'\n a.set_title('-'.join([dim, 'acceleration']))\n a.plot(x, values / 1000, c=c)", "def PlotTimes(metadata, data):\n\n gp = Gnuplot.Gnuplot(persist=1)\n gp('set data style impulses')\n gp('set xtics 1')\n gp.clear()\n gp.xlabel('seconds')\n gp.ylabel('duration in second')\n gp.title(metadata.AsTitle())\n styles = {}\n line_style = 1\n\n for dataset in data:\n x = numpy.array(dataset.time, dtype='float_')\n if not dataset.name in styles:\n styles[dataset.name] = line_style\n line_style += 1\n d = Gnuplot.Data(x, dataset.data,\n title=dataset.name,\n with_='impulses ls %d' % styles[dataset.name])\n else: # no need to repeat a title that exists already.\n d = Gnuplot.Data(x, dataset.data,\n with_='impulses ls %d' % styles[dataset.name])\n\n gp.replot(d)\n gp.hardcopy('/tmp/%s-%s-%f.png' %\n (metadata.name, metadata.kernel, metadata.duration),\n terminal='png')", "def plot_x(t, x):\n plt.figure()\n plt.plot(t, x)\n plt.title(\"Vertical position of the skydiver as a function of time\")\n plt.xlabel(\"Time t [s]\")\n plt.ylabel(\"Height [m]\")\n plt.savefig('Parachute_position.png')", "def display_plot(self, parameter):\n values = list(self.dataframe[parameter])\n #Begining and ending date of the dataset\n beg = self.beg\n end = self.end\n #Settings of the plot\n if parameter == 'temperature':\n #Differienciate the color of the points according to temeprature rule\n import matplotlib as mpl\n cmap = mpl.colors.ListedColormap(['blue', 'yellow', 'orange', 'red'])\n c_norm = mpl.colors.BoundaryNorm(boundaries=[-30,0,15,25,45], ncolors=4)\n plt.scatter(time_range, values, s=0.3, c=values, cmap=cmap, norm=c_norm)\n plt.colorbar()\n else:\n plt.plot(time_range, values, linewidth=0.2)\n plt.xlabel('from {} to {}'.format(beg, end))\n plt.ylabel(parameter)\n plt.title('Weather historical data')\n plt.grid(True)", "def plot2dTimeSeries(values, title='series', xLabel='time', yLabel='values', savePath='.'):\n plt.plot(values)\n plt.ylabel(yLabel)\n plt.xlabel(xLabel)\n plt.xticks(np.linspace(0, len(values), 11))\n plt.title(title)\n plt.savefig(f'{savePath}/{title}.png')\n plt.show(block=False)\n plt.pause(2)\n plt.close()", "def plot_series(self, t1=0, t2=100, t1p=None, t2p=None):\n \n plot_discretized(self.ts, self.ts_dis, t1=t1, t2=t2, t1p=t1p, t2p=t2p)", "def plot_raw_eeg_data(time_data, eeg_data):\n plt.plot(time_data, eeg_data, 'g-')\n plt.xlabel(\"time [secs]\")\n plt.ylabel(\"raw EEG values\")\n plt.title(\"EEG Data\")\n # plt.xlim(min(x_data) - 1, max(x_data) + 1)\n # plt.ylim(min(y_data) - 1, max(y_data) + 1)\n plt.show()", "def data_vis():\n dataroot = 'solar_data.txt'\n debug = False \n diff = False\n X, y = read_data(dataroot, debug, diff)\n\n # First plot the original timeseries\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(y)\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(X[:,0])\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(X[:,1])\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(X[:,2])\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(X[:,3])\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(X[:,4])\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(X[:,5])\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##########################################################################################\n # Plotting the Fourier Transform of the signals\n\n freq = np.fft.fftfreq(len(y), 1*60*60)\n\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(freq, np.abs(np.fft.fft(y)))\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(freq, np.abs(np.fft.fft(X[:,0])))\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(freq, np.abs(np.fft.fft(X[:,1])))\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(freq, np.abs(np.fft.fft(X[:,2])))\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(freq, np.abs(np.fft.fft(X[:,3])))\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(freq, np.abs(np.fft.fft(X[:,4])))\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(freq, np.abs(np.fft.fft(X[:,5])))\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##################################################################################################\n # Print correlation matrix\n\n df = pd.DataFrame(np.c_[y, X])\n df.columns = ['Avg Global PSP (vent/cor) [W/m^2]','Avg Zenith Angle [degrees]','Avg Azimuth Angle [degrees]','Avg Tower Dry Bulb Temp [deg C]','Avg Tower RH [%]','Avg Total Cloud Cover [%]','Avg Avg Wind Speed @ 6ft [m/s]']\n f = plt.figure(figsize=(19, 15))\n plt.matshow(df.corr(), fignum=f.number)\n plt.xticks(range(df.shape[1]), df.columns, fontsize=14, rotation=20)\n plt.yticks(range(df.shape[1]), df.columns, fontsize=14)\n cb = plt.colorbar()\n cb.ax.tick_params(labelsize=14)\n plt.title('Correlation Matrix', fontsize=16);\n plt.show()", "def plot_timeseries(self, x, *a, **kw):\n v, t = self.timeseries(x, **kw)\n utcoffset = kw.pop('utcoffset', None)\n if utcoffset is not None: # temporary hack because plot_date seems to be ignoring tz kwarg...\n t += utcoffset/24.\n for k in ('convert', 'timeslice', 'rmnans'): \n trash = kw.pop(k, None)\n if not a: a = '-' # plot a line by default\n if 'label' not in kw: \n kw.update({'label': x.replace('platform ','').replace('_',' ')})\n if 'axes' in kw: # deal with possible bug in plot_date?\n ax = kw.pop('axes')\n ax.plot_date(t, v, *a, **kw)\n ax.set_xlim(ax.xaxis.get_data_interval()) # update time limits\n else: # just make a new axis\n plt.plot_date(t, v, *a, **kw)\n ax = plt.gca()\n plt.gcf().autofmt_xdate()\n return ax", "def plot_tariff(self):\n\t\tplt.figure(1)\n\t\tplt.cla() # clear the plotting window to allow for re-plotting\n\n\t\tif self.chargetypetoplot.get() == 'energy':\n\t\t\ttoplot = []\n\t\t\tenergy_filter = ('energy' == self.data[\"Charge\"]) | ('Energy' == self.data[\"Charge\"])\n\t\t\tfor mo in range(1, 13):\n\t\t\t\tmonth_filter = (mo >= self.data[\"Start Month\"]) & (mo <= self.data[\"End Month\"])\n\t\t\t\ttemp = self.data.loc[(energy_filter & month_filter), :]\n\t\t\t\ttoplot.append([sum(temp.loc[(hr >= temp['Start Time']) & (hr <= temp['End Time']), \"Value\"])\n\t\t\t\t\t\t\t for hr in range(1, 25)])\n\t\t\tim = plt.imshow(toplot, interpolation='nearest')\n\t\t\tplt.xticks(ticks=[i-.5 for i in range(25)],\n\t\t\t\t\t labels=['{}:00'.format(str(j).zfill(2)) for j in range(25)], rotation=45)\n\t\t\tplt.yticks(ticks=[i-0.5 for i in range(13)],\n\t\t\t\t\t labels=['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul',\n\t\t\t\t\t\t\t 'Aug', 'Sep', 'Oct', 'Nov', 'Dec', ''], va='top')\n\n\t\t\t# get the colors of the values, according to the\n\t\t\t# colormap used by imshow\n\t\t\tvalues = np.unique([toplot[i][j] for i in range(12) for j in range(24)])\n\t\t\tcolors = [im.cmap(im.norm(value)) for value in values]\n\t\t\t# create a patch (proxy artist) for every color\n\t\t\tpatches = [mpatches.Patch(color=colors[i], label=\"${:1.5}/kWh\".format(values[i]))\n\t\t\t\t\t for i in range(len(values))]\n\t\t\t# put those patched as legend-handles into the legend\n\t\t\tplt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=\"upper right\", borderaxespad=0.)\n\t\t\tplt.title('Energy Price Heatmap')\n\t\telse:\n\t\t\ttoplot = []\n\t\t\tdemand_filter = ('demand' == self.data[\"Charge\"]) | ('Demand' == self.data[\"Charge\"])\n\t\t\tfor mo in range(1, 13):\n\t\t\t\tmonth_filter = (mo >= self.data[\"Start Month\"]) & (mo <= self.data[\"End Month\"])\n\t\t\t\ttemp = self.data.loc[(demand_filter & month_filter), :]\n\t\t\t\ttoplot.append([sum(temp.loc[(hr >= temp['Start Time']) & (hr <= temp['End Time']), \"Value\"]) for hr in\n\t\t\t\t\t\t\t range(1, 25)])\n\t\t\tim = plt.imshow(toplot, interpolation='nearest')\n\t\t\tplt.xticks(ticks=[i - .5 for i in range(25)], labels=['{}:00'.format(str(j).zfill(2)) for j in range(25)],\n\t\t\t\t\t rotation=45)\n\t\t\tplt.yticks(ticks=[i - 0.5 for i in range(13)],\n\t\t\t\t\t labels=['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec', ''],\n\t\t\t\t\t va='top')\n\n\t\t\t# get the colors of the values, according to the\n\t\t\t# colormap used by imshow\n\t\t\tvalues = np.unique([toplot[i][j] for i in range(12) for j in range(24)])\n\t\t\tcolors = [im.cmap(im.norm(value)) for value in values]\n\t\t\t# create a patch (proxy artist) for every color\n\t\t\tpatches = [mpatches.Patch(color=colors[i], label=\"${:1.5}/kW\".format(values[i])) for i in\n\t\t\t\t\t range(len(values))]\n\t\t\t# put those patched as legend-handles into the legend\n\t\t\tplt.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc=\"upper right\", borderaxespad=0.)\n\t\t\tplt.title('Demand Price Heatmap')\n\n\t\tself.chart_type.draw()\n\n\t\treturn 0", "def plot_tseries(time_series, fig=None, axis=0,\r\n xticks=None, xunits=None, yticks=None, yunits=None,\r\n xlabel=None, ylabel=None, yerror=None, error_alpha=0.1,\r\n time_unit=None, **kwargs):\r\n\r\n if fig is None:\r\n fig = plt.figure()\r\n\r\n if not fig.get_axes():\r\n ax = fig.add_subplot(1, 1, 1)\r\n else:\r\n ax = fig.get_axes()[axis]\r\n\r\n #Make sure that time displays on the x axis with the units you want:\r\n #If you want to change the time-unit on the visualization from that used to\r\n #represent the time-series:\r\n if time_unit is not None:\r\n tu = time_unit\r\n conv_fac = ts.time_unit_conversion[time_unit]\r\n #Otherwise, get the information from your input:\r\n else:\r\n tu = time_series.time_unit\r\n conv_fac = time_series.time._conversion_factor\r\n\r\n this_time = time_series.time / float(conv_fac)\r\n ax.plot(this_time, time_series.data.T, **kwargs)\r\n\r\n if xlabel is None:\r\n ax.set_xlabel('Time (%s)' % tu)\r\n else:\r\n ax.set_xlabel(xlabel)\r\n\r\n if ylabel is not None:\r\n ax.set_ylabel(ylabel)\r\n\r\n if yerror is not None:\r\n if len(yerror.data.shape) == 1:\r\n this_e = yerror.data[np.newaxis, :]\r\n else:\r\n this_e = yerror.data\r\n delta = this_e\r\n e_u = time_series.data + delta\r\n e_d = time_series.data - delta\r\n for i in range(e_u.shape[0]):\r\n ax.fill_between(this_time, e_d[i], e_u[i], alpha=error_alpha)\r\n\r\n return fig", "def example3():\n arrive_time=example2() # Get packets arrive time using example1\n time_series.plot_time_series(arrive_time) # Plot time series using packets arrive time", "def plot_energy_temperature_history(building, temp_df, prognosis_df, out):\n heat_data = building[list(month_range(building.heating_start, building.heating_stop))]\n df = pd.merge(heat_data, temp_df[\"avg_temp\"], left_index=True, right_index=True)\n # Now to fill the dots in between if data has holes in the end\n for d in month_range(building.heating_stop, prognosis_df.index[-1]):\n if not d in df.index:\n df.loc[d] = np.nan\n # Add prognosis\n df = df.merge(right = prognosis_df, left_index = True, right_index = True, how = 'left')\n # The query results in some random column name for heat values, fix it:\n df.rename(columns={heat_data.name: 'value'}, inplace = True)\n\n # Change to datetime\n df.rename(index = lambda s: datetime.datetime.fromisoformat(s), inplace = True)\n fig = Figure()\n canvas = FigureCanvas(fig)\n ax = fig.add_subplot(111)\n # Heating actualized\n ax.plot(df[\"value\"], 'r-')\n ax.set_ylabel(\"Heat energy usage by month, KWh\")\n ax.legend(\"Energy\", loc=\"upper left\")\n # Prognosis\n ax.plot(df.heating, 'r:')\n # Set year formatting\n ax.xaxis.set_major_locator(mdates.YearLocator())\n ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y'))\n ax.xaxis.set_minor_locator(mdates.MonthLocator())\n\n # Temperatures\n ax2 = ax.twinx()\n fig.gca().invert_yaxis()\n ax2.plot(df[\"avg_temp\"], 'g-', alpha=0.7)\n ax2.set_ylabel(\"Average temperature, °C\")\n ax2.legend(\"Temp (inv)\")\n\n fig.tight_layout()\n fig.autofmt_xdate()\n fig.savefig(out)", "def print_datapoint(parts):\n t = datetime.fromtimestamp(parts[0])\n humid = parts[1]\n temp_c = parts[2]\n temp_f = parts[3]\n heat_c = parts[4]\n heat_f = parts[5]\n print output_tmpl % (t, humid, temp_c, temp_f, heat_c, heat_f)", "def plot_temperatures_by_country(values, country, start, end):\r\n\r\n filtered = values.loc[(values['Country'] == country) &\r\n (values['dt'] >= start) &\r\n (values['dt'] <= end)]\r\n\r\n # x axis values\r\n x1 = filtered['dt']\r\n # corresponding y axis values\r\n y1 = filtered['AverageTemperature']\r\n\r\n # plotting the points\r\n plt.plot(x1, y1, label = \"line 1\")\r\n\r\n filtered = values.loc[(values['Country'] == country) &\r\n (values['dt'] >= '1973-01-01') &\r\n (values['dt'] <= '1974-01-01')]\r\n\r\n # x axis values\r\n x2 = filtered['dt']\r\n # corresponding y axis values\r\n y2 = filtered['AverageTemperature']\r\n\r\n # plotting the points\r\n plt.plot(x2, y2, label=\"line 2\")\r\n\r\n # naming the x axis\r\n plt.xlabel('x - axis - date')\r\n # naming the y axis\r\n plt.ylabel('y - axis - temperature')\r\n\r\n plt.title('Temperatures from ' + start + ' to ' + end + ' for ' + country)\r\n\r\n # function to show the plot\r\n plt.show()", "def one_period_plot():\n file = \"Data/matfiles/20131221.mat\"\n object = MatReader(file)\n\n NeA = object.NeA\n latA = object.latA\n times = object.secondsA\n mlt = object.mltA\n ind1 = 2606 #lat inds\n ind2 = 13940 #lat inds\n \n ind1 = 3197 #mlat inds\n ind2 = 14390 #mlat inds\n \n T = ind2 - ind1\n ind1 += int(T/2)\n ind2 += int(T/2)\n\n latA = latA[ind1:ind2]\n NeA = NeA[ind1:ind2]\n # NeA = object.meanie(NeA, 5)\n times = times[ind1:ind2]\n mlt = mlt[ind1:ind2]\n mlt = hour_round(mlt)\n\n lats = np.zeros_like(latA)\n lats[0] = latA[0]\n for i in range(len(latA)-1):\n dlat = latA[i+1] - latA[i]\n if dlat < 0:\n lats[i+1] = lats[i] - dlat\n else:\n lats[i+1] = lats[i] + dlat\n\n lats += 90\n\n xticks = np.array([-90, -70, -30, 30, 70, 110, 150, 210, 250, 270]) + 90\n gridticks = np.array([-90, -70, -30, 30, 70, 77, 103, 110, 150, 210, 250, 270]) + 90\n # plt.plot(lats, NeA, \".\", markersize = 1)\n # plt.plot([0, 0], [0, np.max(NeA)], \"k\")\n # plt.plot([30, 30], [0, np.max(NeA)], \"k\")\n # plt.plot([60, 60], [0, np.max(NeA)], \"k\")\n # plt.plot([120, 120],[0, np.max(NeA)], \"k\")\n # plt.plot([150, 150], [0, np.max(NeA)], \"k\")\n # plt.plot([167, 167], [0, np.max(NeA)], \"k\")\n # plt.plot([193, 193], [0, np.max(NeA)], \"k\")\n # plt.plot([210, 210], [0, np.max(NeA)], \"k\")\n # plt.plot([240, 244], [0, np.max(NeA)], \"k\")\n # plt.plot([300, 300], [0, np.max(NeA)], \"k\")\n # plt.plot([330, 330], [0, np.max(NeA)], \"k\")\n # plt.plot([360, 360], [0, np.max(NeA)], \"k\")\n # plt.xticks(xticks)\n # plt.xlabel(\"Geomagnetic latitude going from 0 to 360 degrees, starting and ending at south pole\")\n # plt.ylabel(\"Electron density [cm$^{-1}$]\")\n # plt.title(\"One SWARM satellite period\")\n # plt.grid(\"on\", axis = \"x\", xdata = gridticks)\n #adding letters\n x = (gridticks[:-1] + gridticks[1:])/2 - 3\n y = np.zeros_like(x) - np.max(NeA)/40\n s = [\"S\", \"B\", \"A\", \"B\", \"C\", \"D\", \"C\", \"B\", \"A\", \"B\", \"S\"]\n # for i in range(len(x)):\n # plt.text(x[i], y[i], s[i], fontsize = 10)\n # plt.savefig(\"Figures/swarm_period.pdf\")\n # plt.show()\n\n # plt.plot(times, latA)\n # plt.plot(times, mlt)\n # plt.show()\n print(lats[0])\n print(lats[-1])\n \n fig, ax = plt.subplots()\n ax.plot(lats, NeA, \".\", markersize = 1)\n ax.set_xticks(xticks, minor=False)\n ax.set_xticks([167, 193], minor=True)\n ax.xaxis.grid(True, which = \"major\")\n ax.xaxis.grid(True, which = \"minor\")\n for i in range(len(x)):\n ax.text(x[i], y[i], s[i], fontsize = 10)\n ax.set_xlabel(\"Geomagnetic latitude going from 0 to 360 degrees, starting and ending at south pole\")\n ax.set_ylabel(\"Electron density [cm$^{-1}$]\")\n ax.set_title(\"One Swarm satellite period\")\n # plt.savefig(\"Figures/swarm_period.pdf\")\n plt.show()\n plt.plot(mlt, NeA)\n plt.show()\n plt.plot(mlt, lats)\n plt.show()", "def temperature_graph(request, temperature_graph_id):\n\n # Update sensor before viewing\n if not updatesensor(sensor_id):\n messages.info(request, 'Unable to update sensor')\n\n temperature_graph_plot = create_temperature_graph(temperature_graph_id)\n\n context = {\n 'temperature_graph_plot': temperature_graph_plot,\n }\n return render(request, 'open_air/graphs.html', context)", "def __plot(data, days: int = None):\n if days is not None:\n points = days * 144\n else:\n points = len(data)\n\n temp = data[-points:, 1]\n\n plt.plot(range(points), temp)\n plt.grid()\n plt.show()", "def __plot_T__(self, refresh=False, *args):\n # If plot is not requested, return:\n if not self.plotTeVar.get() or not self.plotTiVar.get():\n return\n\n # Check for a closed window:\n if 'T' in self.plots.keys() and not matplotlib.pyplot.fignum_exists(self.plots['T'].number):\n del self.plots['T']\n refresh = False\n # Update the existing plot, if it exists\n refresh = refresh or 'T' in self.plots.keys()\n if refresh:\n if 'T' in self.plots.keys():\n fig = self.plots['T']\n fig = matplotlib.pyplot.figure(fig.number)\n fig.clear()\n else:\n return\n # Make a Tew window:\n else:\n fig = matplotlib.pyplot.figure(figsize=(4,3))\n fig.canvas.set_window_title('T, time = ' + '{:.3f}'.format(1e9*self.imp.t(self.it)))\n ax = fig.add_subplot(111)\n\n # Plot:\n if self.plotTeVar.get():\n ax.plot(1e4*self.imp.r((self.it), self.ir)[0], self.imp.Te((self.it), self.ir)[0], 'r-', label='e')\n if self.plotTiVar.get():\n ax.plot(1e4*self.imp.r((self.it), self.ir)[0], self.imp.Ti((self.it), self.ir)[0], 'b-', label='i')\n\n ax.set_xlabel('r (um)', fontsize=12)\n ax.set_ylabel('T (keV)', fontsize=12)\n ax.legend()\n\n if self.logxVar.get():\n ax.set_xscale('log')\n if self.logyVar.get():\n ax.set_yscale('log')\n\n matplotlib.pyplot.tight_layout()\n\n if not refresh:\n fig.show()\n fig.canvas.draw()\n if self.wm is not None:\n self.wm.addWindow(matplotlib.pyplot.get_current_fig_manager().window)\n self.plots['T'] = fig", "def demo(self, tmin=0, tmax=27.4, cadence=30.0 / 60.0 / 24.0, offset=0, raw=False, ax=None):\n t = np.arange(tmin, tmax, cadence)\n if ax is None:\n plt.figure('demo', figsize=(8, 3))\n else:\n plt.sca(ax)\n y = self.model(t)\n if raw:\n plt.plot(t, y + offset, alpha=0.25, linewidth=1, color='royalblue')\n plt.plot(t, self.integrated(t) + offset, alpha=0.5, linewidth=1, color='darkorange')\n plt.xlim(tmin, tmax)\n # plt.ylim(np.max(y)+0.01, np.min(y)-0.01)\n plt.xlabel('Time (days)')\n plt.ylabel('Flux (mag.)')", "def visualize_signal(self):\n plt.figure()\n plt.title('Accelerometer Signal')\n plt.plot(range(len(self.data)), self.data[1])", "def temperature_animation(location_name=None, total_time=24 * 365, dt=3600.0, init_temp=273.0, burn_in=0, plot_burn_in=False, hw=10,\r\n dest=None, fps=24, const_albedo=None):\r\n fig = plt.figure(constrained_layout=True)\r\n\r\n # If a location is specified, generate a plot of the temperature at that location over time next to the map\r\n if not location_name is None:\r\n # Get location\r\n location = geolocator.geocode(location_name)\r\n print(\"Using the location: {}\".format(location.address))\r\n # Get coordinates\r\n\r\n lat = location.latitude\r\n long = location.longitude\r\n theta = (90 - location.latitude) * pi / 180\r\n phi = location.longitude * pi / 180\r\n\r\n # initialize variables\r\n T = init_temp\r\n t = 0\r\n loc_times = []\r\n loc_temps = []\r\n\r\n # iteratively update temperature at location\r\n for i in range(total_time + burn_in):\r\n loc_times.append(t)\r\n T = temp_tic(T, t, dt, phi, theta, burn_in * dt, const_albedo=const_albedo)\r\n loc_temps.append(T)\r\n t += dt\r\n\r\n # Create plot\r\n fig.set_size_inches(9, 3)\r\n gs = fig.add_gridspec(1, 6)\r\n f1_plot = fig.add_subplot(gs[0, 0:3])\r\n f1_map = fig.add_subplot(gs[0, 3:])\r\n if plot_burn_in:\r\n f1_plot.plot(loc_times, loc_temps,\r\n label=r\"$\\theta = {} \\pi$\".format(str(round(theta / pi, 2))) + \", $\\phi = {} \\pi$\".format(\r\n str(round(phi / pi, 2))))\r\n else:\r\n f1_plot.plot(loc_times[burn_in:], loc_temps[burn_in:],\r\n label=r\"$\\theta = {} \\pi$\".format(str(round(theta / pi, 2))) + \", $\\phi = {} \\pi$\".format(\r\n str(round(phi / pi, 2))))\r\n line_point, = f1_plot.plot([], [], 'ro', label=\"current temperature\")\r\n f1_plot.set_xlabel(\"Time [s]\")\r\n f1_plot.set_ylabel(\"Temperature [K]\")\r\n f1_plot.set_title(\"Temperature at the location: {}\".format(location_name))\r\n f1_plot.legend()\r\n\r\n f1_map.plot(long, lat, marker='X', markersize=12, c='white', mec='black')\r\n\r\n else:\r\n # create the appropriate size plot for only the map\r\n fig.set_size_inches(6, 3)\r\n gs = fig.add_gridspec(1, 2)\r\n f1_map = fig.add_subplot(gs[0, :])\r\n\r\n # plot earth land masses\r\n world = gpd.read_file('./Land_Mass_Shapes/World_Land.shp')\r\n world.plot(ax=f1_map, facecolor='none', ec='white')\r\n\r\n # Setting up grid\r\n x = np.arange(-180, 180, hw)\r\n y = np.arange(-90, 90, hw)\r\n X, Y = np.meshgrid(x, y)\r\n\r\n # Initializing the grid with the initial temperature\r\n Z = np.zeros((len(y), len(x))) + init_temp\r\n im = f1_map.imshow(Z, cmap=plt.cm.jet, interpolation='bilinear', extent=(-180, 180, -90, 90), vmin=0, vmax=300)\r\n\r\n # creating a substellar point\r\n sub_stellar_point, = f1_map.plot(0, 0, marker='*', markersize=12, c='white', mec='black')\r\n\r\n # Doing all the calculations ahead of time so the animation runs more smooth\r\n times = []\r\n map_temps = []\r\n map_means = []\r\n map_ranges = []\r\n sub_stel_pos = []\r\n months = [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"October\", \"November\", \"December\"]\r\n for frame in range(total_time + burn_in):\r\n # updating time\r\n t = frame * dt\r\n times.append(t)\r\n # updating the temperatures\r\n Z = temp_tic(Z, t, dt, (X + hw / 2) * pi / 180, (Y + hw / 2 + 90) * pi / 180, burn_in * dt, const_albedo)\r\n map_temps.append(Z)\r\n # mean temp on earth\r\n map_means.append(round(np.mean(Z), 1))\r\n # range of values for the color bar\r\n map_ranges.append((np.min(Z), np.max(Z) + abs((300 - np.max(Z)) / 10)))\r\n # updating the location of the substellar point\r\n sub_stel_pos.append((((w_rot * (-t)) * 180 / pi) - int(((w_rot * (-t)) * 180 / pi - 180) / 360) * 360,\r\n (Theta * np.cos(w_orb * (-t))) * 180 / pi))\r\n\r\n # create animated plot\r\n def my_animate(frame):\r\n if not dest is None:\r\n print(\"The video is {}% done saving.\".format(round((frame) / (total_time) * 100, 1)))\r\n\r\n # start animation after burn in\r\n t = times[frame]\r\n frame = frame + burn_in\r\n\r\n # update all items in the image\r\n im.set_array(map_temps[frame])\r\n im.set_clim(map_ranges[frame][0], map_ranges[frame][1])\r\n sub_stellar_point.set_data(sub_stel_pos[frame][0], sub_stel_pos[frame][1])\r\n\r\n # update figure title (171 day offset in time is for northern summer solstice)\r\n f1_map.set_title('Month: ' + months[int((frame*dt + 3600*24*171) // (365 / 12 * 24 * 3600))%12] +\r\n ' - Total time: {} days'.format(t // (24 * 3600)) +\r\n ' \\n Avegare termperature: {}K '.format(str(map_means[frame])))\r\n\r\n # if a location is specified update the current temperature on the plot\r\n if not location_name == None:\r\n xs = loc_times[frame:frame + 1]\r\n ys = loc_temps[frame:frame + 1]\r\n line_point.set_data(xs, ys)\r\n f1_plot.set_title(\"Temperature at the location: {} \\nCurrent temperature: {}K \".format(location_name,\r\n round(loc_temps[frame])))\r\n\r\n # create animation object\r\n anim = animation.FuncAnimation(fig, my_animate,\r\n frames=total_time,\r\n interval=1 / fps * 1000)\r\n\r\n # aesthetics such as color bars and axes\r\n plt.colorbar(im, label=\"Temperature [K]\")\r\n f1_map.set_xlabel(\"Longitude\")\r\n f1_map.set_ylabel(\"Latitude\")\r\n f1_map.xaxis.set_ticks(np.linspace(-180, 180, 5))\r\n f1_map.yaxis.set_ticks(np.linspace(-90, 90, 5))\r\n\r\n # save animation if destination is specified\r\n if dest is not None:\r\n Writer = animation.writers['ffmpeg']\r\n writer = Writer(fps=fps, metadata=dict(artist='Me'), bitrate=1800)\r\n anim.save(dest)\r\n\r\n dest = None\r\n plt.show()", "def plotDFT(x):\n \n X = DFTdirect(x)\n plt.plot([c.re for c in x], [c.im for c in x], 'ro')\n plt.plot([c.re for c in X], [c.im for c in X], 'bo')\n plt.show()", "def plot_data(heart_filt, pace_filt):\n\n plt.figure(1)\n plt.plot(heart_filt, pace_filt)\n plt.show()", "def plot_energies(self):\n plt.plot(self.energies[0], self.energies[1])\n plt.xlabel('Time (s)')\n plt.ylabel('Energy (J)')\n plt.show()", "def plot_observed(self):\n \n fig = plt.figure(figsize=(15,5))\n plt.subplot(1,3,1)\n for k in self.observed_data.keys():\n plt.plot(self.observed_data[k][0], self.observed_data[k][1], 'bx')\n plt.xlabel(\"X\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n\n fig = plt.figure(figsize=(16,4))\n \n # Plot of time vs X\n plt.subplot(1,3,2)\n for k in self.observed_data.keys(): \n plt.plot(k*np.ones(self.observed_data[k].shape[1]), self.observed_data[k][0], 'bx')\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"X\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n\n # Plot of time vs Y\n plt.subplot(1,3,3)\n for k in self.observed_data.keys():\n plt.plot(k*np.ones(self.observed_data[k].shape[1]), self.observed_data[k][1], 'bx')\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n plt.show();", "def plot_single_trajectory(self):\n\n plt.plot(self.trip_centroids['lon'], self.trip_centroids['lat'], '-o')", "def plotTrajectory(xydata, colordata = 'time', cmap = cm.jet, size = 20, ax = None, line = True):\n \n if isinstance(colordata, basestring) and colordata in ['time']:\n c = np.linspace(0, len(xydata[:,0]), len(xydata[:,0]));\n else:\n c = colordata;\n \n if ax is None:\n ax = plt.gca();\n s = ax.scatter(xydata[:,0], xydata[:,1], c = c, cmap = cmap, s = size, marker = 'o', lw = 0);\n if isinstance(line, basestring):\n ax.plot(xydata[:,0], xydata[:,1], color = line); \n \n ax.get_figure().colorbar(s, ax = ax);\n return ax", "def plot(self):\n\t\t\n\t\ttf=tfData(self.shotno,tStart=None,tStop=None)\n\t\t\n\t\t_plt.figure()\n\t\tax1 = _plt.subplot2grid((3,2), (0,1), rowspan=3) #tf\n\t\tax2 = _plt.subplot2grid((3,2), (0,0)) #vf\n\t\tax3 = _plt.subplot2grid((3,2), (1,0),sharex=ax2) #oh\n\t\tax4 = _plt.subplot2grid((3,2), (2, 0),sharex=ax2) #sh\n\t\tfig=_plt.gcf()\n\t\tfig.set_size_inches(10,5)\n\t\t\t\t\n\t\ttStart=-2\n\t\ttStop=20\n\t\t\n\t\tax1.plot(tf.time*1e3,tf.tfBankField)\n\t\tax1.axvspan(tStart,tStop,color='r',alpha=0.3)\n\t\t_plot.finalizeSubplot(ax1,xlabel='Time (s)',xlim=[-150,450],ylabel='TF Field (T)')#,title=self.title\n\t\t\n\t\tax2.plot(self.vfTime*1e3,self.vfBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax2,ylabel='VF Current\\n(kA)')\n\t\t\n\t\tax3.plot(self.ohTime*1e3,self.ohBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax3,ylim=[-20,30],ylabel='OH Current\\n(kA)')\n\t\t\n\t\tax4.plot(self.shTime*1e3,self.shBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax4,ylim=[tStart,tStop],xlabel='Time (s)',ylabel='SH Current\\n(kA)')\n\t\t\n\t\t_plot.finalizeFigure(fig,title=self.title)\n#\t\tfig.set_tight_layout(True)\n\t\t\n\t\treturn fig", "def plotOfOneChannel(self, i=0):\n\t\tp1=_plot.plot(xLabel='time [ms]',yLabel=r'a.u.',title=self.title,\n\t\t\t\t\t shotno=[self.shotno],subtitle=self.sensorNames[i]);\n\t\t\n\t\t# smoothed data\n\t\tp1.addTrace(yData=self.data[i],xData=self.time*1000,\n\t\t\t\t\tyLegendLabel=self.sensorNames[i]) \n\t\t\t\n\t\treturn p1", "def plot_data(data):\n\n print(\"Plotting.\")\n\n # Changing dates to floating point, also moves Date out of index\n df_ohlc = data.reset_index()\n df_ohlc['index'] = df_ohlc['index'].map(mdates.date2num)\n df_ohlc.rename(columns={'index': 'Date'}, inplace=True)\n\n # Rearrange columns for ohlc\n columns = ['Date', 'Open', 'High', 'Low', 'Close']\n df_ohlc = df_ohlc[columns]\n\n fig = plt.figure()\n ax1 = plt.subplot()\n ax1.xaxis_date()\n\n candlestick_ohlc(ax1, df_ohlc.values, width=.5, colorup='g', colordown='r')\n\n plt.ylabel(\"Price\")\n plt.xlabel(\"Date\")", "def pf_plot(pf, t):\n xx = pf.XS[t, :, 0]\n yy = pf.XS[t, :, 1]\n ww = pf.WS[t, :]\n plt.scatter(xx, yy, s=ww * 5000)", "def plot_1():\n p_files = []\n filename = \"energy_data_2D_80\"\n for file in sorted(os.listdir(folder)):\n if file.startswith(filename):\n p_files.append(os.path.join(folder,file))\n T_list = []\n fig, ax = plt.subplots()\n for p_file in p_files[3::3]:\n T = (os.path.splitext(os.path.basename(p_file))[0]).split('_',4)[4]\n #print(T)\n E = []\n t = []\n if (T not in T_list):\n T_list.append(T)\n with open(p_file) as csvfile:\n lines = csv.reader(csvfile, delimiter=' ')\n sweep = 0\n for row in lines:\n E.append(float(row[0]))\n t.append(sweep)\n sweep += 1\n ax.plot(t[0:200], E[0:200],label=\"T = \"+format(T[0:3]))\n ax.set_title(\"Energy per bond vs Time\")\n ax.set_ylabel(\"e / J\")\n ax.set_xlabel(\"t / sweeps\")\n ax.legend()\n\n fig.savefig(folder2+\"energy_vs_time.png\")\n fig.savefig(texfolder+\"energy_vs_time.pdf\")", "def __plot(name, x, y):\n import matplotlib.pyplot as plt\n\n plt.plot(x, y)\n plt.xlabel('elements')\n plt.ylabel('time (seconds)')\n plt.savefig(\"{}\".format(name))", "def plot_v(t, v):\n p1 = plt.plot(t,v)\n plt.xlabel('Time [s]')\n plt.ylabel('Velocity [m/s]')\n plt.title('Velocity for the skydiver as a function of time')\n plt.show()\n plt.savefig('Parachute_velocity.png')", "def visualize_tma_time_series(data_path):\n\n X, y = load_tma_data(data_path)\n\n fig = plt.figure()\n ax = fig.add_subplot('111')\n\n for i in range(X.shape[0]):\n C = X[i, ...].reshape(X.shape[1], X.shape[2])\n l = y[i]\n ax.imshow(C, vmin=0, vmax=1)\n ax.set_title('Label : %i' % l)\n plt.pause(0.1)\n\n # labels = np.unique(y)\n # fig, axes = plt.subplots(figsize=(13, 4), ncols=4)\n # for i, l in enumerate(labels, start=0):\n # idx = np.where(y == l)[0]\n # temp = np.mean(X[idx, ...], axis=0)\n # temp[:8, :] = temp[:8, :]*6\n # pos = axes[i].imshow(temp, vmin=0, vmax=1)\n # axes[i].set_title(\"Label : %i\" % l)\n # fig.colorbar(pos, ax=axes[i])\n # plt.show()", "def plot_pixel_timeseries(self, folder_name, indices):\n # TODO, swap x and y axes in the parameters\n # single pixel to plot\n # indexed in the style of python: [row, column] = [y, x]\n (y_index, x_index) = indices\n if type(x_index) == int:\n print('Plotting ' + str(x_index) + ' , ' + str(y_index))\n ts = self.get_pixel_timeseries(folder_name, indices)\n indices = self.get_indices_from_filenames(folder_name)\n index_dates = dates.date2num(indices)\n fig, ax = plt.subplots(tight_layout=True)\n\n ax.plot_date(index_dates, ts, xdate=True, linestyle='solid', marker='None',\n label='Pixel [' +str(x_index) + ', ' + str(y_index)+']')\n ax.legend()\n ax.grid(b=True, which='major', color='#666666', linestyle='-')\n\n # Show the minor grid lines with very faint and almost transparent grey lines\n ax.minorticks_on()\n #ax.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)\n ax.tick_params(axis='x', labelrotation=30.0)\n ax.set_ylabel('Counts')\n ax.set_xlabel('Time')\n\n fig.set_figwidth(10)\n fig.savefig(self.parent_folder + 'analysis/timeseries__' + str(x_index) + '_' + str(y_index) + '_.png')\n fig.savefig(self.parent_folder + 'analysis/timeseries__' + str(x_index) + '_' + str(y_index) + '_.svg')\n fig.savefig(self.parent_folder + 'analysis/timeseries__' + str(x_index) + '_' + str(y_index) + '_.eps')\n #fig.savefig(self.parent_folder + 'analysis/timeseries_TEST2_' + str(x_index) + '_' + str(y_index) + '.svg')\n fig.clf()\n\n # multiple pixels to plot\n else:\n fig, ax = plt.subplots()\n for i in range(0, len(x_index)):\n print('Plotting ' + str(x_index[i]) + ' , ' + str(y_index[i]))\n ts = self.get_pixel_timeseries(folder_name, (x_index[i], y_index[i]))\n indices = self.get_indices_from_filenames(folder_name)\n index_dates = dates.date2num(indices)\n\n ax.plot_date(index_dates, ts, xdate=True, linestyle='solid', marker='None',\n label=str(x_index[i]) + ' , ' + str(y_index[i]))\n\n ax.legend()\n ax.grid(b=True, which='major', color='#666666', linestyle='-')\n\n # Show the minor grid lines with very faint and almost transparent grey lines\n ax.minorticks_on()\n ax.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)\n ax.tick_params(axis='x', labelrotation=30.0)\n #fig.set_figwidth(40)\n fig.savefig(\n self.parent_folder + 'analysis/timeseries_TEST_' + str(x_index) + '_' + str(y_index) + '.png')\n fig.savefig(\n self.parent_folder + 'analysis/timeseries_TEST_' + str(x_index) + '_' + str(y_index) + '.svg')\n fig.clf()", "def plot_time_frames(self):\n\n fig = plt.figure()\n plt.grid(True)\n\n plt.ylim([-1.5,1.5])\n plt.xlim([0,1])\n\n for key in self.timeframes.keys():\n if key == 0:\n plt.plot(self.x, self.timeframes[key], label=\"time: \" + str(round(key*self.dt, 3)), linewidth=5)\n else:\n plt.plot(self.x, self.timeframes[key], label=\"time: \" + str(round(key*self.dt, 3)))\n\n plt.title(\"Wave at different times\")\n plt.legend(loc=\"upper right\")\n plt.show()\n\n # fig.savefig('results/pics_wave/vibrating_string_'+ self.type + '.png', dpi=150)", "def plot_basic(time, data, lgnd=None):\n pylab.figure()\n pylab.plot(time, data)\n pylab.xlabel('time, s')\n pylab.ylabel('data')\n pylab.title('Basic Plotter')\n if lgnd != None:\n pylab.legend(lgnd)\n pylab.grid(True)\n pylab.show()", "def plotSingleTimeseries(data):\r\n \r\n print '...creating plot'\r\n fig = plt.figure(figsize=(11,8.5))\r\n ax = fig.add_subplot(111)\r\n for header in HEADER_NAMES[1:]:\r\n ax.plot(data[HEADER_NAMES[0]],data[header],label=header)\r\n #i, h = ax.get_legend_handles_labels()\r\n \r\n fig.autofmt_xdate()\r\n ax.set_title(PLOT_TITLE)\r\n ax.set_xlabel(X_AXIS_TITLE)\r\n ax.set_ylabel(Y_AXIS_TITLE)\r\n ax.grid(True)\r\n ax.xaxis.set_major_formatter(md.DateFormatter('%m-%d-%Y %H:%M'))\r\n #print i,h\r\n ax.legend()\r\n plt.show()\r\n return i,h", "def coffee_plot(times, Temps, axes, legends=[None, None], split=2):\n plot_model(times[split:], Temps[split:], axes[0], legends[0])\n plot_samples(times[:split], Temps[:split], axes[1], legends[1])", "def plot_training_dataset(t, x_train):\n\n # Setup the figure.\n fig, axes = plt.subplots(\n 1, 2, sharex=True, figsize=(fig_width, fig_width / 6)\n )\n\n # Plot the oscillators' positions.\n axes[0].plot(t, x_train[:, : dsys.inputs], alpha=0.5)\n\n # Add decorators.\n axes[0].set_ylabel(r\"$q_i[k]$\")\n\n # Plot the oscillators'velocities.\n axes[1].plot(t, x_train[:, dsys.inputs :], alpha=0.5)\n\n # Add decorators.\n axes[1].set(xlim=(t.min(), t.max()), xlabel=r\"k\", ylabel=r\"$p_i[k]$\")\n\n return", "def _plot_ts(self, data, labels, ax,\n show_ylabels=True,\n offset=0.0,\n special_idx=[],\n errors_list=[],\n fontsize=FiguresConfig.LARGE_FONT_SIZE):\n if data.ndim == 1:\n data = data[np.newaxis, :]\n offset = int(offset)\n # apply offset setting onto the data\n data = data[:, offset:]\n\n # get shape of data to be plotted\n nsamples, ntimes = data.shape\n\n nTS = 1\n def_alpha = 1.0\n # generate ylabels for the plot\n labels = generate_region_labels(nsamples, labels)\n\n # set plotting parameters: alpha_ratio, colors, alphas\n alpha_ratio = 1.0 / nsamples\n colors = np.array(['k'] * nTS)\n alphas = np.maximum(np.array(\n [def_alpha] *\n nTS) *\n alpha_ratio,\n 1.0)\n colors[special_idx] = 'r'\n alphas[special_idx] = np.maximum(alpha_ratio, 0.1)\n\n # apply normalization for each trace\n for i in range(nsamples):\n data[i, :] = data[i, :] / np.nanmax(data[i, :])\n\n # plot each trace\n x = np.arange(ntimes)\n for itrace in range(nTS):\n for i in range(nsamples):\n y = data[i, :] + np.r_[i]\n ax.plot(x, y,\n color=colors[itrace],\n label=labels[itrace],\n alpha=alphas[itrace])\n\n # plot errors bars\n if errors_list:\n error = errors_list[error]\n ax.fill_between(x, y - error, y + error,\n color=colors[itrace],\n alphas=alphas[itrace])\n\n if show_ylabels:\n # print(\"Labels are : \", labels)\n y_ticks = np.arange(len(labels))\n ax.set_yticks(y_ticks)\n ax.set_yticklabels(labels, fontsize=fontsize / 1.5)\n\n for tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(fontsize / 1.5)\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(fontsize / 1.5)\n\n return ax", "def update_temperature_values(self):\n year = self._current_date.year\n month = self._current_date.month\n\n self.ensure_temperatures(dt.date(year, month, 15))\n self.set_temperature_arrays(dt.date(year, month, 15))", "def plot_ebtel(data_directory,data_file,**kwargs):\n \n #Load the data\n data = np.loadtxt(data_directory+data_file)\n #Slice the array to get the vectors we want\n time = data[:,0]\n temp = data[:,1]\n dens = data[:,2]\n temp_apex = data[:,5]\n dens_apex = data[:,6]\n heat = data[:,10]\n \n #Set up the figure\n fig,axes = plt.subplots(3,1,figsize=(12,8))\n #Set the fontsize\n fs = 16\n\n #Plot the heating\n axes[0].plot(time,heat)\n axes[0].set_ylabel(r'$h$ (erg cm$^{-3}$ s$^{-1}$)',fontsize=fs)\n axes[0].set_title(r'EBTEL-C Parameters',fontsize=fs)\n axes[0].set_xlim([time[0],time[-1]])\n axes[0].locator_params(nbins=5)\n axes[0].ticklabel_format(axis='y', style='sci', scilimits=(-2,2) )\n #Plot the temperatures\n axes[1].plot(time,temp/10**6,label=r'$T$')\n axes[1].plot(time,temp_apex/10**6,'r--',label=r'$T_a$')\n axes[1].set_ylabel(r'$T$ (MK)',fontsize=fs)\n axes[1].legend(loc=1)\n axes[1].set_xlim([time[0],time[-1]])\n axes[1].locator_params(nbins=5)\n #Plot the densities\n axes[2].plot(time,dens/10**8,label=r'$n$')\n axes[2].plot(time,dens_apex/10**8,'r--',label=r'$n_a$')\n axes[2].set_xlabel(r'$t$ (s)',fontsize=fs)\n axes[2].set_ylabel(r'$n$ ($10^{8}$ cm$^{-3}$)',fontsize=fs)\n axes[2].set_xlim([time[0],time[-1]])\n axes[2].locator_params(nbins=5)\n axes[2].legend(loc=1)\n\n #Check if output filename is specified\n if 'print_fig_filename' in kwargs:\n plt.savefig(kwargs['print_fig_filename'],format='eps',dpi=1000)\n else:\n plt.show()", "def plot_tseries_index(*args, **kwargs) :\n data = kwargs.pop('data')\n return data.dropna().plot(y=args[0], **kwargs)", "def makePlot(timeStamp):\n\n #-------------------------------------------------------------------------\n # Create figure and axes\n #-------------------------------------------------------------------------\n\n width = 12 # inches\n height = 8 # inches\n fig = plt.figure(figsize=(width, height))\n\n # We'll use gridspec to create axes in rectangular 6-by-5 lattice\n import matplotlib.gridspec as gridspec\n nrows = 6\n ncols = 5\n Grid = gridspec.GridSpec(nrows, ncols)\n\n # axis for elevation time series\n axElev = fig.add_subplot(Grid[:2, :2]) # first 2 rows, first 2 columns\n # axis for slab\n axSlab = fig.add_subplot(Grid[:2, 2:]) # first 2 rows, columns > 2\n # and the transects\n axTran1 = fig.add_subplot(Grid[2:4, :]) # rows 2,3,4, all columns\n # rows 5,6,7, all columns, share x/y axis with previous (sets same ticks\n # etc)\n axTran2 = fig.add_subplot(Grid[4:6, :], sharex=axTran1, sharey=axTran1)\n\n # gridspec allows to tune the spacing between plots (unit is fraction of\n # font size)\n boundary_pad = 3.5\n horizontal_pad = 0.2\n vertical_pad = 1.0\n # figure area left,bottom,right,top in normalized coordinates [0,1]\n bounds = [0, 0, 1, 1]\n Grid.tight_layout(\n fig,\n pad=boundary_pad,\n w_pad=horizontal_pad,\n h_pad=vertical_pad,\n rect=bounds)\n\n #-------------------------------------------------------------------------\n # Create plots\n #-------------------------------------------------------------------------\n\n # for all avaiable colormaps see ( '_r' reverses the colormap )\n # http://matplotlib.org/examples/color/colormaps_reference.html\n colormap = plt.get_cmap('Spectral_r')\n colormap_kine = plt.get_cmap('gist_heat')\n\n # slab\n salt_clim = [0, 32]\n ncontours = 16\n # bouding box for slab [xmin,xmax,ymin,ymax] in model x,y coordinates\n estuarybbox = [330000, 360000, 284500, 297500]\n dia = slabSnapshotDC(\n clabel='Salinity',\n unit='psu',\n clim=salt_clim,\n cmap=colormap)\n dia.setAxes(axSlab)\n dia.addSample(slabDC, timeStamp=timeStamp, plotType='contourf',\n bbox=estuarybbox, N=ncontours)\n # overrides default format for colorbar floats\n dia.showColorBar(format='%.2g')\n #dia.addTitle('in case you want a custom title')\n # get transect (x,y) coordinates from the transectDC\n transectXYCoords = generateTransectFromDataContainer(transectDC_salt, 0)[4]\n # plot transect on the map (thin black on thick white)\n dia.addTransectMarker(transectXYCoords[:, 0], transectXYCoords[:, 1],\n color='w', linewidth=2.0)\n dia.addTransectMarker(transectXYCoords[:, 0], transectXYCoords[:, 1],\n color='k', linewidth=1.0)\n # plot station markers\n for station in stationsToPlot:\n staX = staFileObj.getX(station)\n staY = staFileObj.getY(station)\n dia.addStationMarker(\n staX,\n staY,\n label=station,\n printLabel=True,\n marker='*')\n # add text to plot. x,y are in normalized axis coordinates [0,1]\n dia.ax.text(0.05, 0.98, 'custom text', fontsize=fontsize,\n verticalalignment='top', horizontalalignment='left',\n transform=dia.ax.transAxes)\n\n # elevation time series\n # define the time range to plot\n elevStartTime = datetime.datetime(2012, 5, 4, 0, 0)\n elevEndTime = datetime.datetime(2012, 5, 5, 0, 15)\n elevMeanTime = elevStartTime + (elevEndTime - elevStartTime) / 2\n elevLim = [-1.5, 2.5]\n dia = timeSeriesPlotDC2(\n xlabel=elevMeanTime.strftime('%Y %b %d'),\n ylim=elevLim)\n dia.setAxes(axElev)\n #dia.addShadedRange( timeStamp, timeStamp+datetime.timedelta(seconds=30), facecolor='IndianRed')\n dia.addShadedRange(\n timeStamp,\n timeStamp,\n edgecolor='IndianRed',\n facecolor='none',\n linewidth=2)\n tag = elevDC.getMetaData('tag')\n dia.addSample(\n elevDC.timeWindow(\n elevStartTime,\n elevEndTime),\n label=tag,\n color='k')\n dia.addTitle('Elevation ({0:s}) [m]'.format(\n elevDC.getMetaData('location').upper()))\n # adjust the number of ticks in x/y axis\n dia.updateXAxis(maxticks=5)\n dia.updateYAxis(maxticks=3, prune='lower')\n\n # transects\n dia = transectSnapshotDC(\n clabel='Salinity',\n unit='psu',\n cmap=colormap,\n clim=salt_clim)\n dia.setAxes(axTran1)\n #transectDC_salt.data *= 1e-3\n dia.addSample(transectDC_salt, timeStamp, N=ncontours)\n dia.addTitle('')\n dia.showColorBar()\n # plot station markers\n for station in stationsToPlot:\n staX = staFileObj.getX(station)\n staY = staFileObj.getY(station)\n dia.addStationMarker(staX, staY, label=station, color='k',\n linewidth=1.5, linestyle='dashed')\n # do not show x axis ticks and label for this plot\n dia.hideXTicks()\n\n dia = transectSnapshotDC(clabel='TKE', unit='m2s-1', logScale=True,\n clim=[-7, -2], climIsLog=True, cmap=colormap_kine)\n dia.setAxes(axTran2)\n dia.addSample(transectDC_kine, timeStamp, N=ncontours)\n # plot station markers\n for station in stationsToPlot:\n staX = staFileObj.getX(station)\n staY = staFileObj.getY(station)\n dia.addStationMarker(staX, staY, label=station, color='k',\n linewidth=1.5, linestyle='dashed')\n dia.addTitle('')\n dia.showColorBar()\n dia.updateXAxis(maxticks=15)\n dia.updateYAxis(maxticks=6)\n\n #-------------------------------------------------------------------------\n # Save to disk\n #-------------------------------------------------------------------------\n dateStr = timeStamp.strftime('%Y-%m-%d_%H-%M')\n filename = '_'.join([imgPrefix, dateStr])\n saveFigure(\n imgDir,\n filename,\n imgFiletype,\n verbose=True,\n dpi=200,\n bbox_tight=True)\n plt.close()", "def plot_time_series(values, times=None, axes=None, indices=None, title=None):\n # Do a shallow copy to allow us to transform list into nparray.\n y_values = np.copy(values)\n\n # If no axes were given, create a new figure.\n if axes is None:\n fig = plt.figure()\n axes = fig.add_axes([0, 0, 1, 1])\n\n # If no valid indices are given, use them all.\n all_indices = np.linspace(0, len(values) - 1, len(values), dtype=int)\n if indices is None:\n to_use = all_indices\n else:\n to_use = np.copy(indices)\n invalid_indices = np.setdiff1d(all_indices, to_use)\n\n # If the times are not given, then use linear spacing.\n if times is None:\n x_values = all_indices\n else:\n x_values = np.copy(times)\n\n # Plot the data with the curve in blue, the valid points as blue dots,\n # and the invalid indices as smaller red dots.\n axes.plot(x_values, y_values, \"b\")\n axes.plot(x_values[to_use], y_values[to_use], \"b.\", ms=25)\n axes.plot(x_values[invalid_indices], y_values[invalid_indices], \"r.\", ms=10)\n\n if title is not None:\n axes.set_title(title)", "def plot_dt_signal(x, title=None):\n pylab.figure()\n pylab.stem(range(len(x)), x)\n pylab.title(title)\n pylab.xlabel(\"samples\")", "def plot_timeseries(self, series):\n plt.plot(range(1, len(series) + 1), series)\n plt.title(self.ticker)\n plt.savefig('plots/ARIMA/{0}.pdf'.format(self.ticker))\n plt.show()", "def plot(self):\n\t\tself.plotOfHeatingCurrent().plot()", "def visualize_time_series(fig_ax, data, inp_color, missing_data, lag_color, first_date,\n x_label=\"Number of Days\", y_label=\"Log of Aluminium Price\", title=\"Prices over time\"):\n fig, ax = fig_ax\n ((x_train_raw, y_train_raw), y_pred_list) = data\n\n missing_x, missing_y = missing_data\n is_missing = len(missing_x) != 0\n\n first_date = datetime.strptime(first_date, '%Y-%m-%d')\n\n convert_date = lambda x: [\n np.datetime64((first_date + timedelta(days=d)).strftime('%Y-%m-%d'))\n for d in x\n ]\n convert_price = lambda x: x[\"Output\"].to_list()\n\n x_train = convert_date(x_train_raw[\"Date\"].to_list())\n y_train = convert_price(y_train_raw)\n \n cut_point = x_train[-1]\n ax.plot(x_train, y_train, color=color[inp_color])\n\n for i, y_pred in enumerate(y_pred_list):\n data, plot_name, color_code, is_bridge = y_pred\n mean_pred, x_test_raw = data[\"mean\"], data[\"x\"]\n x_test = convert_date(x_test_raw)\n\n if i == 0 and is_missing:\n missing_x = convert_date(missing_x)\n ax.axvline(x_test[0], color=color[lag_color], linestyle='--', linewidth=0.5, dashes=(5, 0), alpha=0.2)\n ax.plot([missing_x[-1], x_test[0]], [missing_y[-1], mean_pred[0]], color[lag_color], linestyle=\"dashed\")\n ax.axvspan(cut_point, x_test[0], color=color[lag_color], alpha=0.1)\n\n plot_bound(ax, data, x_test, color[color_code], plot_name)\n\n if is_bridge and (not is_missing): \n ax.plot([x_train[-1], x_test[0]], [y_train[-1], mean_pred[0]], color[color_code], linewidth=1.5)\n\n if is_missing:\n ax.plot(missing_x, missing_y, color=color[lag_color], linestyle=\"dashed\")\n ax.plot([x_train[-1], missing_x[0]], [y_train[-1], missing_y[0]], color[lag_color], linestyle=\"dashed\")\n ax.axvline(cut_point, color=color[lag_color], linestyle='--', linewidth=0.5, dashes=(5, 0), alpha=0.2)\n else:\n ax.axvline(cut_point, color=color[\"k\"], linestyle='--')\n\n ax.xaxis.set_minor_locator(AutoMinorLocator())\n ax.legend()\n\n # ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.set_title(title)\n\n # ax.set_xlim(left=cut_point-np.timedelta64(1, 'm'))\n plot_axis_date(ax, x_train + missing_x + x_test)\n ax.grid()\n return fig, ax", "def plotTimeDelta(data, type_plot, device):\n mean = data.mean()\n std = data.std()\n max_data = data.max()\n min_data = data.min()\n max_indx = np.argmax(data) # max value index\n min_indx = np.argmin(data) # min value index\n x = np.arange(min_data, max_data, 0.1)\n y = normfun(x, mean, std)\n res_quantile = quantileValues(data, device)\n if type_plot == 0:\n plt.plot(x, y, color='blue')\n annot_max_min(x, y)\n # plt.hist(data.dropna(), bins=500, rwidth=0.9, normed=True)\n plt.title('Time Delta distribution')\n plt.xlabel('Time Delta')\n plt.ylabel('Probability')\n sns.distplot(tmp.deltaSeconds.dropna(),\n kde=True, rug=True, rug_kws={\"color\": \"k\"},\n kde_kws={\"color\": \"red\", \"lw\": 3, \"label\": \"KDE\"},\n hist_kws={\"histtype\": \"step\", \"lw\": 3, \"alpha\": 1,\n \"color\": \"g\"},\n bins=500)\n # ax.set(xlabel='Vibration Intensity', ylabel='Probability')\n elif type_plot == 1: # plot the max and min point\n plt.plot(data)\n plt.plot(max_indx, data[max_indx], 'ks')\n show_max = '['+str(max_indx)+' '+str(data[max_indx])+']'\n plt.annotate(show_max,\n xytext=(max_indx, data[max_indx]),\n xy=(max_indx, data[max_indx]))\n plt.plot(min_indx, data[min_indx], 'gs')\n show_min = '['+str(min_indx)+' '+str(data[min_indx])+']'\n plt.annotate(show_min,\n xytext=(min_indx, data[min_indx]),\n xy=(min_indx, data[min_indx]))\n plt.title('Time Delta')\n plt.xlabel('Index')\n plt.ylabel('Vibration Intensity Value')\n elif type_plot == 2: # boxplot\n boxplot(data.dropna())\n return res_quantile", "def plot(self, *args, **kwargs):\n pass", "def plot_stress_time(F_tot, response_t, coords, t_range):\n section = np.where((t>t_range[0]) & (t<t_range[1]))[0]\n fig, ax1 = plt.subplots(figsize=[15,5])\n ax2 = ax1.twinx()\n# ax1.set_title('Load and response at '+str(coords),fontsize = 14)\n ax1.set_xlim(t_range)\n ax1.set_xlabel('t [s]')\n resp = ax1.plot(t[section], response_t[section]/10**6, color=\"#00A6D6\",\n label='Equivalent gate stress')\n ax1.set_ylabel('Stress [MPa]', fontsize=12)\n d_max = 1.2*max(response_t[section])/10**6\n d_mean = np.mean(response_t[section])/10**6\n ax1.set_ylim(d_mean-d_max,d_max)\n ax1.legend()\n\n force = ax2.plot(t[section], F_tot[section]/1000, color=\"#c3312f\", label = 'Wave force')\n ax2.set_ylabel('Integrated wave force [$kN/m$]', fontsize = 12)\n F_lim = 1.2*max(F_tot[section])/1000\n F_mean = np.mean(F_tot[section]/1000)\n ax2.set_ylim(F_mean-F_lim,F_lim)\n\n lines = resp + force\n labs = [l.get_label() for l in lines]\n ax1.grid(lw=0.25)\n ax1.legend(lines,labs, fontsize = 12)\n return fig", "def plot_ts(da, key):\n p = sns.lineplot(data=da.to_pandas(), linewidth=2)\n p.set_xlabel('time')\n p.set_ylabel(key)", "def plotdFvsTime(f_ts, r_ts, F_df, R_df, F_ddf, R_ddf):\n fig = pl.figure(figsize=(8,6))\n ax = fig.add_subplot(111)\n pl.setp(ax.spines['bottom'], color='#D2B9D3', lw=3, zorder=-2)\n pl.setp(ax.spines['left'], color='#D2B9D3', lw=3, zorder=-2)\n for dire in ['top', 'right']:\n ax.spines[dire].set_color('none')\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n max_fts = max(f_ts)\n rr_ts = [aa/max_fts for aa in f_ts[::-1]]\n f_ts = [aa/max_fts for aa in f_ts]\n r_ts = [aa/max_fts for aa in r_ts]\n\n line0 = pl.fill_between([r_ts[0], f_ts[-1]], R_df[0]-R_ddf[0], R_df[0]+R_ddf[0], color='#D2B9D3', zorder=-5)\n for i in range(len(f_ts)):\n line1 = pl.plot([f_ts[i]]*2, [F_df[i]-F_ddf[i], F_df[i]+F_ddf[i]], color='#736AFF', ls='-', lw=3, solid_capstyle='round', zorder=1)\n line11 = pl.plot(f_ts, F_df, color='#736AFF', ls='-', lw=3, marker='o', mfc='w', mew=2.5, mec='#736AFF', ms=12, zorder=2)\n\n for i in range(len(rr_ts)):\n line2 = pl.plot([rr_ts[i]]*2, [R_df[i]-R_ddf[i], R_df[i]+R_ddf[i]], color='#C11B17', ls='-', lw=3, solid_capstyle='round', zorder=3)\n line22 = pl.plot(rr_ts, R_df, color='#C11B17', ls='-', lw=3, marker='o', mfc='w', mew=2.5, mec='#C11B17', ms=12, zorder=4)\n\n pl.xlim(r_ts[0], f_ts[-1])\n\n pl.xticks(r_ts[::2] + f_ts[-1:], fontsize=10)\n pl.yticks(fontsize=10)\n\n leg = pl.legend((line1[0], line2[0]), (r'$Forward$', r'$Reverse$'), loc=1, prop=FP(size=18), frameon=False)\n pl.xlabel(r'$\\mathrm{Fraction\\/of\\/the\\/simulation\\/step}$', fontsize=16, color='#151B54')\n pl.ylabel(r'$\\mathrm{\\Delta G\\/%s}$' % P.units, fontsize=16, color='#151B54')\n pl.xticks(f_ts, ['%.2f' % i for i in f_ts])\n pl.tick_params(axis='x', color='#D2B9D3')\n pl.tick_params(axis='y', color='#D2B9D3')\n pl.savefig(os.path.join(P.output_directory, 'dF_t.pdf'))\n pl.close(fig)\n return", "def plot(self, show=True, save=True):\n x = numpy.vstack([therm.X for therm in self._thermals])\n plt.scatter(x[:,1] / 1000.0, x[:,0] / 1000.0, s=5, edgecolors='none')\n if save:\n f = plt.gcf()\n f.savefig('thermal_field.png', format='png', dpi=1000)\n if show:\n plt.show()", "def plot_fitter(self):\n\n total_time=self.interval*self.maxspectra\n times = np.linspace(self.interval,total_time + 1,self.interval)\n spectra_fitter.main(self.rt_plot.sum_data, times)", "def plot_table(timestamps: dict, threadList: list, mList: list) -> None:\n plt.plot(threadList, timestamps.values(), 'o-')\n plt.legend(mList, title = 'Total valores', loc='best', bbox_to_anchor=(0.5, 0., 0.5, 0.5))\n plt.xlabel('Número de processos')\n plt.ylabel('Tempo de Execução (s)')\n plt.title('Tempo de Execução por Total de Processos e Valores')\n plt.show()", "def old():\n therm = [[300.], [300.], [300.], [300.], [300.], [300.], [300.], [300.], [300.], [300.]]\n ts = np.linspace(0, 50, 1000)\n\n #odvod temperature bo vsota gradientov (diferencialov) z desne in z leve glede x\n #dT/dt[i] = K/x^2 * (temperature[i-1]- 2*temperature[i] + temperature[i+1])\n #razen ce je robna tocka\n #potem je treba nekaj scarat - robna bo funkcija\n def odvod(indeks, arr, K, time):\n odvodt = K * (arr[indeks-1][time] - 2*arr[indeks][time] + arr[indeks+1][time])\n return odvodt\n\n def robna(time):\n return 5*m.cos(0.05*time)\n\n\n K = 0.02\n x = 0.003\n\n def main_old():\n t = 0\n dt = 50. / 1000.\n for time in ts:\n for i in range(0,9):\n therm[i].append(therm[i][t] + (robna(time) if i==0 else odvod(i, therm, K, t)*dt/(x**2)))\n therm[9].append(300.)\n t+=1\n\n import matplotlib.pyplot as plt\n\n plt.plot(ts[:], therm[4][:-1], label = 'T(t)')\n plt.show()\n \n main_old()", "def plot(self, data_frame):\n self.axes.plot(data_frame, 'o-')\n self.axes.set_ylim(0.0, 200.0)\n self.fig.autofmt_xdate()\n self.draw()", "def coordinate_vs_time_plotter(array, xyz_axis=0, bird=0, axis_of_time_steps=2, start=0., end=1.):\r\n y_values = array[bird, xyz_axis, :]\r\n x_values = get_time_array(array, axis_of_time_steps, start, end)\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot()\r\n\r\n if xyz_axis == 0:\r\n ax.set_ylabel('X (m)')\r\n elif xyz_axis == 1:\r\n ax.set_ylabel('Y (m)')\r\n elif xyz_axis == 2:\r\n ax.set_ylabel('Z (m)')\r\n else:\r\n print(\"That is not a valid axis choice. Please choose one of: 0, 1, 2\")\r\n ax.set_xlabel('Time (s)')\r\n ax.scatter(x_values, y_values)\r\n return fig.show()", "def visualizations():\r\n raise NotImplementedError\r\n # df = pandas.read_csv('accidents_by_hour.csv', index_col=0, header=0)\r\n # plt.plot(0, 0, data=df)\r\n # plt.show()\r", "def plot(self):\n\n # initialize outside the loop to avoid memory leak\n\n plot_a = None\n\n # initial plotting scales\n vmin = 0\n vmax = 0\n pmin = 0\n pmax = 0\n\n sr = self.dio.get_properties(self.channel)['samples_per_second']\n\n if self.control.verbose:\n print 'sample rate: ', sr\n\n # initial time info\n display_lag = 60\n b = self.dio.get_bounds(self.channel)\n\n if self.control.verbose:\n print 'data bounds: ', b\n\n if self.control.start:\n dtst0 = dateutil.parser.parse(self.control.start)\n st0 = (dtst0 - datetime.datetime(1970, 1,\n 1, tzinfo=pytz.utc)).total_seconds()\n st0 = int(st0 * sr)\n else:\n st0 = int(b[0])\n\n if self.control.end:\n dtst0 = dateutil.parser.parse(self.control.end)\n et0 = (dtst0 - datetime.datetime(1970, 1,\n 1, tzinfo=pytz.utc)).total_seconds()\n et0 = int(et0 * sr)\n else:\n et0 = int(b[1])\n\n if self.control.verbose:\n\n print 'start sample st0: ', st0\n print 'end sample et0: ', et0\n\n blocks = self.control.bins * self.control.frames\n\n samples_per_stripe = self.control.num_fft * \\\n self.control.integration * self.control.decimation\n total_samples = blocks * samples_per_stripe\n\n if total_samples > (et0 - st0):\n print 'Insufficient samples for %d samples per stripe and %d blocks between %ld and %ld' % (samples_per_stripe, blocks, st0, et0)\n return\n\n stripe_stride = (et0 - st0) / blocks\n\n bin_stride = stripe_stride / self.control.bins\n\n start_sample = st0\n\n print 'first ', start_sample\n\n # get metadata\n # this could be done better to ensure we catch frequency or sample rate\n # changes\n mdt = self.dio.read_metadata(st0, et0, self.channel)\n try:\n md = mdt[mdt.keys()[0]]\n cfreq = md['center_frequencies'].ravel()[self.sub_channel]\n except (IndexError, KeyError):\n cfreq = 0.0\n\n if self.control.verbose:\n print 'processing info : ', self.control.frames, self.control.bins, samples_per_stripe, bin_stride\n\n for p in numpy.arange(self.control.frames):\n sti_psd_data = numpy.zeros(\n [self.control.num_fft, self.control.bins], numpy.float)\n sti_times = numpy.zeros([self.control.bins], numpy.complex128)\n\n for b in numpy.arange(self.control.bins):\n\n if self.control.verbose:\n print 'read vector :', self.channel, start_sample, samples_per_stripe\n\n d_vec = self.dio.read_vector(\n start_sample, samples_per_stripe, self.channel)\n data = d_vec[:, self.sub_channel]\n\n if self.control.decimation > 1:\n data = scipy.signal.decimate(data, self.control.decimation)\n sample_freq = sr / self.control.decimation\n else:\n sample_freq = sr\n\n if self.control.mean:\n detrend_fn = matplotlib.mlab.detrend_mean\n else:\n detrend_fn = matplotlib.mlab.detrend_none\n\n try:\n psd_data, freq_axis = matplotlib.mlab.psd(\n data, NFFT=self.control.num_fft, Fs=float(sample_freq), detrend=detrend_fn, scale_by_freq=False)\n except:\n traceback.print_exc(file=sys.stdout)\n\n sti_psd_data[:, b] = numpy.real(\n 10.0 * numpy.log10(numpy.abs(psd_data) + 1E-12))\n\n sti_times[b] = start_sample / sr\n\n start_sample += stripe_stride\n\n # Now Plot the Data\n ax = self.subplots[p]\n\n # determine image x-y extent\n extent = (\n 0,\n self.control.bins,\n numpy.min(freq_axis) / 1e3,\n numpy.max(freq_axis) / 1e3,\n )\n\n # determine image color extent in log scale units\n Pss = sti_psd_data\n vmin = numpy.real(numpy.median(Pss) - 6.0)\n vmax = numpy.real(numpy.median(\n Pss) + (numpy.max(Pss) - numpy.median(Pss)) * 0.61803398875 + 50.0)\n\n if self.control.zaxis:\n vmin = int(string.split(self.control.zaxis, ':')[0])\n vmax = int(string.split(self.control.zaxis, ':')[1])\n else:\n vmin = numpy.real(numpy.median(Pss) - 6.0)\n vmax = numpy.real(numpy.median(\n Pss) + (numpy.max(Pss) - numpy.median(Pss)) * 0.61803398875 + 50.0)\n\n im = ax.imshow(sti_psd_data, cmap='jet', origin='lower', extent=extent,\n interpolation='nearest', vmin=vmin, vmax=vmax, aspect='auto')\n\n ax.set_ylabel('f (kHz)', fontsize=8)\n\n # plot dates\n\n tick_spacing = numpy.arange(\n self.control.bins / 8, self.control.bins, self.control.bins / 8)\n ax.set_xticks(tick_spacing)\n tick_labels = []\n\n for s in tick_spacing:\n tick_time = sti_times[s]\n\n if tick_time == 0:\n tick_string = ''\n else:\n gm_tick_time = time.gmtime(numpy.real(tick_time))\n tick_string = '%02d:%02d:%02d' % (\n gm_tick_time[3], gm_tick_time[4], gm_tick_time[5])\n tick_labels.append(tick_string)\n\n ax.set_xticklabels(tick_labels)\n\n # set the font sizes\n tl = ax.get_xticklabels()\n\n for tk in tl:\n tk.set_size(8)\n del tl\n\n tl = ax.get_yticklabels()\n\n for tk in tl:\n tk.set_size(8)\n del tl\n\n print 'last ', start_sample\n\n # create a time stamp\n start_time = st0 / sr\n srt_time = time.gmtime(start_time)\n sub_second = int(round((start_time - int(start_time)) * 100))\n\n timestamp = \"%d-%02d-%02d %02d:%02d:%02d.%02d UT\" % (srt_time[0], srt_time[\n 1], srt_time[2], srt_time[3], srt_time[4], srt_time[5], sub_second)\n\n self.f.suptitle('%s %s %4.2f MHz (%s)' % (\n self.control.title, timestamp, cfreq / 1E6, self.control.path), fontsize=10)\n\n # ax.legend(fontsize=8)\n ax.set_xlabel('time (UTC)', fontsize=8)\n\n # fixup ticks\n\n tl = ax.get_xticklabels()\n for tk in tl:\n tk.set_size(8)\n del tl\n tl = ax.get_yticklabels()\n for tk in tl:\n tk.set_size(8)\n del tl\n\n self.gridspec.update()\n\n self.f.tight_layout()\n\n self.f.subplots_adjust(top=0.95, right=0.88)\n cax = self.f.add_axes([0.9, 0.12, 0.02, 0.80])\n self.f.colorbar(im, cax=cax)\n if self.control.outname:\n fname, ext = os.path.splitext(self.control.outname)\n if ext == '':\n ext = '.png'\n print \"Save plot as {}\".format(fname+ext)\n matplotlib.pyplot.savefig(fname+ext)\n if self.control.appear or not self.control.outname:\n print \"Show plot\"\n matplotlib.pyplot.show()", "def plot_xyz():\n plt.subplot(3,1,1) # for x axis\n plt.title('x value v.s. time')\n plt.grid(True)\n plt.ylabel('X')\n plt.xlabel('t')\n plt.plot(x, '-r')\n\n plt.subplot(3,1,2) # for y axis\n plt.title('y value v.s. time')\n plt.grid(True)\n plt.ylabel('Y')\n plt.xlabel('t')\n plt.plot(y, '-g')\n\n plt.subplot(3,1,3) # for z axis\n plt.title('z value v.s. time')\n plt.grid(True)\n plt.ylabel('Z')\n plt.xlabel('t')\n plt.plot(z, '-b')", "def plot_vanHove_dt(comp,conn,start,step_size,steps):\n \n (fin,) = conn.execute(\"select fout from comps where comp_key = ?\",comp).fetchone()\n (max_step,) = conn.execute(\"select max_step from vanHove_prams where comp_key = ?\",comp).fetchone()\n Fin = h5py.File(fin,'r')\n g = Fin[fd('vanHove',comp[0])]\n\n temp = g.attrs['temperature']\n dtime = g.attrs['dtime']\n\n\n # istatus = plots.non_i_plot_start()\n \n fig = mplt.figure()\n fig.suptitle(r'van Hove dist temp: %.2f dtime: %d'% (temp,dtime))\n dims = figure_out_grid(steps)\n \n plt_count = 1\n outs = []\n tmps = []\n for j in range(start,start+step_size*steps, step_size):\n (edges,count,x_lim) = _extract_vanHove(g,j+1,1,5)\n if len(count) < 50:\n plt_count += 1\n continue\n #count = count/np.sum(count)\n \n sp_arg = dims +(plt_count,)\n ax = fig.add_subplot(*sp_arg)\n ax.grid(True)\n\n \n alpha = _alpha2(edges,count)\n \n ax.set_ylabel(r'$\\log{P(N)}$')\n ax.step(edges,np.log((count/np.sum(count))),lw=2)\n ax.set_title(r'$\\alpha_2 = %.2f$'%alpha + ' j:%d '%j )\n ax.set_xlim(x_lim)\n plt_count += 1\n\n mplt.draw()\n\n # plots.non_i_plot_start(istatus)\n\n del g\n Fin.close()\n del Fin", "def liveplot(x, y, xlim, ylim, title):\n plt.plot(x,y,'b.')\n plt.xlim(xlim)\n plt.ylim(ylim)\n plt.xlabel('North-South Axis')\n plt.ylabel('East-West Axis')\n plt.title(title)\n plt.show()", "def raw_time_series():\n symbol, company, stock_df = get_csv_data()\n price = input(\"\"\"\n Please pick your pricing measure (Open, High, Low, Close)\n Please enter a valid price: \"\"\") \n #Close = stock_df.Close\n #Date = stock_df.Date\n \n # print(type(stock_df))\n # print(stock_df)\n \n #plots the user specified price measure against date\n plot = stock_df.plot(x='Date', y=price)\n\n #renaming the details of the graph\n plot.set_xlabel(\"Date\")\n plot.set_ylabel('Price at ' + price)\n plot.set_title('Raw time series for ' + company)\n plot.ticklabel_format(axis = 'y', style = 'plain')\n plt.show()\n\n #code for following graphs was adapted from https://towardsdatascience.com/an-end-to-end-project-on-time-series-analysis-and-forecasting-with-python-4835e6bf050b\n matplotlib.rcParams['axes.labelsize'] = 14\n matplotlib.rcParams['xtick.labelsize'] = 12\n matplotlib.rcParams['ytick.labelsize'] = 12\n matplotlib.rcParams['text.color'] = 'k'\n \n #Converting Date to datetime so that it works for the operations below\n stock_df['Date'] = pd.to_datetime(stock_df.Date)\n #stock_df.index = stock_df['Date'] \n \n #Removes the other columns from the dataframe\n data = stock_df.filter(['Date',price])\n\n #setting the data as the index so that it can be plotted against 'price'\n data = data.set_index('Date')\n \n #returns the monthly average 'price'\n y = data[price].resample('MS').mean()\n \n #renaming details of the graph\n plot2 = y.plot(figsize=(15, 6))\n plot2.set_xlabel(\"Date\")\n plot2.set_ylabel('Price at ' + price)\n plot2.set_title('Average monthly ' + price + ' price for ' + company)\n plt.show()\n \n #decomposition graphs from the link above\n rcParams['figure.figsize'] = 18, 8\n decomposition = sm.tsa.seasonal_decompose(y, model='additive')\n fig = decomposition.plot()\n #fig.set_title('Time series decompisition for ' + company)\n plt.show()", "def trajectory1(self):\r\n\r\n trackt = [] # particle trajectory,\r\n trackx = [] # particle trajectory\r\n an = [] # analitical s**2 + x**2 = t**2\r\n s1 = [] # s = 10; s = 0, light\r\n s2 = [] # s = 20;\r\n s3 = [] # s = 40;\r\n for i in range(0, len(self.dt.obs.obt_g)):\r\n trackt.append(float(i))\r\n trackx.append(self.dt.x[i])\r\n an.append(math.sqrt(float(i) ** 2 + self.dt.x[i] ** 2))\r\n s1.append(math.sqrt(1.0 ** 2 + self.dt.x[i] ** 2))\r\n s2.append(math.sqrt(2.0 ** 2 + self.dt.x[i] ** 2))\r\n s3.append(math.sqrt(4.0 ** 2 + self.dt.x[i] ** 2))\r\n\r\n # plots:\r\n\r\n (fig, ax) = plt.subplots() # figsize=(7,5)\r\n\r\n # trajectory\r\n\r\n ax.plot(\r\n trackx,\r\n trackt,\r\n marker='+',\r\n linewidth=1,\r\n linestyle='-',\r\n color='green',\r\n label='treck',\r\n )\r\n\r\n # measurement t\r\n # ax.plot(self.dt.x, self.dt.t, marker=\"+\", linestyle=\" \", color=\"blue\", label=\"result of measurement\")\r\n\r\n ax.plot(\r\n self.dt.x,\r\n self.dt.t,\r\n marker='o',\r\n linestyle=' ',\r\n color='black',\r\n label='result of measurement',\r\n )\r\n\r\n # analitical t\r\n\r\n ax.plot(self.dt.x, an, linestyle='-', color='red',\r\n label='continuum')\r\n\r\n # light trajectory\r\n\r\n ax.plot(trackx, trackx, linestyle='-', color='yellow',\r\n label='s=0 (light)')\r\n\r\n # s(x) curves\r\n\r\n ax.plot(\r\n trackx,\r\n s1,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=1.0',\r\n )\r\n ax.plot(\r\n trackx,\r\n s2,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=2.0',\r\n )\r\n ax.plot(\r\n trackx,\r\n s3,\r\n linestyle=':',\r\n linewidth=1,\r\n color='k',\r\n label='s=4.0',\r\n )\r\n\r\n # error of measurement t\r\n\r\n ax.errorbar(self.dt.x, self.dt.t, fmt='k ', yerr=self.dt.t_err)\r\n\r\n # signature on the horizontal x-axis\r\n\r\n ax.set_xlabel('x in metres')\r\n xm = -1.0\r\n for i in range(len(self.dt.x)):\r\n if self.dt.x[i] > xm:\r\n xm = self.dt.x[i]\r\n stepx = round(xm / float(len(self.dt.x)), 1)\r\n xm = round(xm + stepx, 1)\r\n ax.set_xlim([0.0, xm])\r\n\r\n # signature on vertical y axis\r\n\r\n ax.set_ylabel('t in metres of light time ')\r\n ym = -1.0\r\n for i in range(len(self.dt.t)):\r\n if self.dt.t[i] > ym:\r\n ym = self.dt.t[i]\r\n stepy = round(ym / float(len(self.dt.t)), 1)\r\n ym = round(ym + stepy, 1)\r\n ax.set_ylim([0.0, ym])\r\n\r\n # Create an instance of the class that will be responsible for the location of the labels (base is step on x)\r\n\r\n locatorx = matplotlib.ticker.MultipleLocator(base=stepx)\r\n\r\n # Set the locator for the main labels\r\n\r\n ax.xaxis.set_major_locator(locatorx)\r\n\r\n # Create an instance of the class that will be responsible for the location of the labels (base is step on y)\r\n\r\n locatory = matplotlib.ticker.MultipleLocator(base=stepy)\r\n\r\n # Set the locator for the main labels\r\n\r\n ax.yaxis.set_major_locator(locatory)\r\n\r\n ax.grid()\r\n\r\n # show legend\r\n\r\n ax.legend(loc='upper left')\r\n\r\n # show drawing\r\n\r\n plt.show()", "def plot_acc(acc_watch, x_acc_df):\n\tfig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)\n\tplt.xlabel('Time (ms)')\n\tplt.ylabel('acc. value')\n\tax1.set_title('Acceleration Data from ECG')\n\tax2.set_title('Acceleration Data from Watch')\n\n\t# ecg data\n\tax1.plot(x_acc_df['timestamp'], x_acc_df['x_acc'] )\n\t# ppg data\n\tax2.plot(acc_watch['timestamp'], acc_watch['v0'])\n\n\tplt.show()", "def plot_data(self, filepath=None, time_min=None, time_max=None, title=None,\n electrode=None):\n\n # normalizes the samples x electrodes array containing the EEG data and\n # adds 1 to each row so that the y-axis value corresponds to electrode\n # location in the MNI coordinate (x,y,z) by electrode df containing\n # electrode locations\n\n if self.get_data().shape[0] == 1:\n nii = self.to_nii()\n nii.plot_glass_brain(pdfpath=filepath)\n elif self.get_data().empty:\n fig = plt.figure()\n ax = fig.add_subplot(111, aspect='equal')\n ax.set_facecolor('w')\n ax.set_xlabel(\"time\")\n ax.set_ylabel(\"electrode\")\n if filepath:\n plt.savefig(filename=filepath)\n else:\n plt.show()\n else:\n Y = _normalize_Y(self.data) # self.get_data()) this allows us to plot all the electrodes even the recon ones\n\n if electrode is not None:\n Y = Y.loc[:, electrode]\n if len(Y.shape) > 1:\n for i, column in enumerate(Y):\n Y[column] = Y[column] - int(column) + i\n\n # divide index by sample rate so that index corresponds to time\n if self.sample_rate:\n Y.index = np.divide(Y.index,np.mean(self.sample_rate))\n\n # if a time window is designated index data in that window\n if all([time_min, time_max]):\n mask = (Y.index >= time_min) & (Y.index <= time_max)\n Y = Y[mask]\n\n # if a time window is not designated, default to the first 500 seconds\n else:\n time_min = 0\n time_max = 10\n mask = (Y.index >= time_min) & (Y.index <= time_max)\n Y= Y[mask]\n \n if electrode:\n if len(Y.shape) > 1:\n ax = Y.plot(title=title, lw=.6)\n else:\n ax = Y.plot(title=title, lw=.6, color='k')\n else:\n ax = Y.plot(legend=False, title=title, color='k', lw=.6)\n ax.set_facecolor('w')\n ax.set_xlabel(\"time\")\n ax.set_ylabel(\"electrode\")\n\n if filepath:\n plt.savefig(filename=filepath)\n else:\n plt.show()", "def plotPerTimeStamp(options):\n name = options['name'] + '_' + options['scan'] + '_perTime'\n if options['extra']:\n name += '_' + options['extra']\n f = openRootFileR(options['name']+'_perTime')\n histname = plotName(name, timestamp=False)\n filename = plotName(name, timestamp=True)\n filepath = plotPath(name, timestamp=True)\n print '<<< Save plot:', filepath\n hist = f.Get(histname)\n hist.SetErrorOption(options['error'])\n if options['big']:\n canvas = TCanvas('c', '', 8000, 1200)\n else:\n canvas = TCanvas('c', '', 1400, 500)\n canvas.SetLogy(options['logy'])\n gStyle.SetOptStat(options['optstat'])\n hist.Draw()\n gPad.Update()\n hist.GetXaxis().SetTimeDisplay(1)\n hist.GetXaxis().SetTimeFormat('#splitline{%d.%m.%y}{%H:%M:%S}%F1969-12-31' \\\n +' 22:00:00')\n hist.GetXaxis().SetLabelOffset(0.03)\n hist.GetXaxis().SetTitle('')\n if 'xmin' in options and 'xmax' in options:\n hist.GetXaxis().SetRangeUser(options['xmin'], options['xmax'])\n hist.GetYaxis().SetTitle(options['ytitle'])\n hist.GetYaxis().SetTitleOffset(1.2)\n for axis in [hist.GetXaxis(), hist.GetYaxis()]:\n axis.SetTitleFont(133)\n axis.SetTitleSize(16)\n axis.SetLabelFont(133)\n axis.SetLabelSize(12)\n axis.CenterTitle()\n if options['big']:\n axis.SetTickLength(0.01)\n if options['big']:\n hist.GetYaxis().SetTitleOffset(0.25)\n drawSignature(filename)\n gPad.Modified()\n gPad.Update()\n if options['retrn']:\n return [canvas, hist, f]\n else:\n canvas.Print(filepath)\n canvas.Close()\n closeRootFile(f, options['name']+'_perTime')", "def set_plot_labels():\n plt.xlabel(\"Time (s)\")\n plt.ylabel(\"Temperature (F)\")\n plt.title('Temperature Time Series')", "def plotTimeDepth(d,v):\n\n dpth,t = getTimeDepth(d,v)\n plt.figure(num=0, figsize = (6, 4))\n plt.plot(dpth,t,linewidth=2);\n plt.title('Depth-Time');\n plt.grid()\n plt.gca().set_xlabel('Depth (m)',fontsize=9)\n plt.gca().set_ylabel('Two Way Time (s)',fontsize=9)\n\n plt.tight_layout()\n plt.show()", "def ProfilePlot(t,y,z,scale=86400, axis=0,color=[0.5,0.5,0.5]):\r\n from matplotlib import collections\r\n from matplotlib.ticker import Formatter\r\n\r\n class MyFormatter(Formatter):\r\n def __init__(self, dates, fmt='%b %d %Y'):\r\n self.fmt = fmt\r\n self.dates = dates\r\n\r\n def __call__(self, x, pos=0):\r\n 'Return the label for time x s'\r\n return datetime.strftime(datetime(1990,1,1)+timedelta(seconds=x),self.fmt)\r\n\r\n tsec = othertime.SecondsSince(t)\r\n formatter = MyFormatter(tsec)\r\n \r\n y = np.swapaxes(y,0,axis)\r\n \r\n lines=[]\r\n line2 =[]\r\n for ii, tt in enumerate(tsec):\r\n #xplot = set_scale(y[:,ii],tt)\r\n xplot = tt + y[:,ii]*scale\r\n lines.append(np.array((xplot,z)).T)\r\n line2.append(np.array([[tt,tt],[z[0],z[-1]]]).T)\r\n \r\n \r\n LC1 = collections.LineCollection(lines,colors=color,linewidths=1.5)\r\n LC2 = collections.LineCollection(line2,colors='k',linestyles='dashed') # Zero axis\r\n \r\n ax=plt.gca()\r\n ax.add_collection(LC1)\r\n ax.add_collection(LC2)\r\n ax.set_ylim((z.min(),z.max()))\r\n ax.xaxis.set_major_formatter(formatter)\r\n ax.set_xlim((tsec[0],tsec[-1]))\r\n plt.xticks(rotation=17) \r\n \r\n return ax", "def draw_trajectory(filepath: str, timestamps: bool = False):\n\n t, x, y, z = coordinates.parse_coordinates_file(filepath=filepath)\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n plt.xlabel('X', fontsize=10, rotation = 0)\n plt.ylabel('Y', fontsize=10, rotation = 0)\n ax.set_zlabel('Z', fontsize=10, rotation = 0)\n\n # Add timestamps to plot\n if timestamps:\n for i in range(len(t)):\n timea = str(datetime.timedelta(seconds=t[i]))\n ax.annotate(timea, (x[i], y[i], z[i]),)\n\n ax.scatter(x, y, z, label='Траектория движения НКА')\n # ax.legend()\n\n plt.show()", "def PlotTimeSeries(ticker, years_ago=5, verbose_mode=False):#, months_ago=0): \n \n # There are two Yahoo Modules we can use to pull our data (closeHist)\n # We'll pull from one and if we get an error will use the alternate\n try:\n closeHist = pd.DataFrame(yf.download(ticker,\n period='max', \n progress=False)['Close']).rename({'Close':'Price'}, axis=1)\n #closeHist = pd.DataFrame(yf.Ticker(ticker).history(period='max')['Close']).rename({'Close':'Price'}, axis=1)\n closeHist.index = closeHist.index.to_pydatetime()\n closeHist.index.name = 'Date'\n except json.JSONDecodeError:\n closeHist = pd.DataFrame(y_fin.get_data(ticker)['close']).rename({'close':'Price'}, axis=1)\n closeHist.index = closeHist.index.to_pydatetime()\n closeHist.index.name = 'Date'\n # Trim our data to years_ago\n closeHist = closeHist[closeHist.index > dt.datetime.now() + relativedelta(years=-years_ago)]\n closeHist.reset_index(inplace=True)\n #Calculate monthly avg. Price\n closeHist['Month'] = closeHist.Date.apply(lambda x: dt.date(x.year, x.month, 1))\n closeHist = closeHist.groupby('Month').last().rename({'Price':'Price(Monthly avg.)'}, axis=1)\n closeHist['x_index'] = pd.Series(range(len(closeHist.index)), closeHist.index)\n\n # Find Peaks and Troughs (Local Maximums and Minimums)\n MinSeries = closeHist['Price(Monthly avg.)'][(closeHist['Price(Monthly avg.)'].shift(1) > closeHist['Price(Monthly avg.)']) & \n (closeHist['Price(Monthly avg.)'].shift(-1) > closeHist['Price(Monthly avg.)'])]\n MaxSeries = closeHist['Price(Monthly avg.)'][(closeHist['Price(Monthly avg.)'].shift(1) < closeHist['Price(Monthly avg.)']) & \n (closeHist['Price(Monthly avg.)'].shift(-1) < closeHist['Price(Monthly avg.)'])]\n \n \n MinSeries = pd.concat([MinSeries, \n closeHist['Price(Monthly avg.)'][(closeHist.index <= MaxSeries.index[0])&\n (closeHist['Price(Monthly avg.)'] < MaxSeries.iloc[0])].head(1)]).sort_index()\n\n \n #BothSeries = pd.concat([MinSeries, MaxSeries]).sort_index()\n #MaxMaxSeries = BothSeries[(BothSeries.shift(1) < BothSeries) & (BothSeries.shift(-1) < BothSeries)]\n #MinMinSeries = BothSeries[(BothSeries.shift(1) > BothSeries) & (BothSeries.shift(-1) > BothSeries)]\n \n \n\n #3PTL Buy Line\n X = list()\n Y = list()\n x_1_date = MaxSeries.idxmax()\n x_1 = closeHist[closeHist.index==x_1_date].x_index.iloc[0]\n X.append(x_1)\n Y.append(MaxSeries.max())\n try:\n x_2_date = MaxSeries[MaxSeries.index > x_1_date].idxmax()\n x_2 = closeHist[closeHist.index==x_2_date].x_index.iloc[0]\n X.append(x_2)\n Y.append(MaxSeries[MaxSeries.index > x_1_date].max())\n except ValueError:\n pass\n #3PTL Sell Line\n X2 = list()\n Y2 = list()\n x2_1_date = MinSeries.idxmin()\n x2_1 = closeHist[closeHist.index==x2_1_date].x_index.iloc[0]\n X2.append(x2_1)\n Y2.append(MinSeries.min())\n try:\n x2_2_date = MinSeries[MinSeries.index > x2_1_date].idxmin()\n x2_2 = closeHist[closeHist.index==x2_2_date].x_index.iloc[0]\n X2.append(x2_2)\n Y2.append(MinSeries[MinSeries.index > x2_1_date].min())\n except ValueError:\n pass\n\n print('Current Price for', ticker, 'is', str(round(closeHist['Price(Monthly avg.)'].iloc[-1], 2)))\n\n sellLine_list = list()\n buyLine_list = list()\n\n #Calculate and plot Sell line:\n if len(X2) < 2:\n # IF WE CANNOT BUILD A SELL LINE USING MAX, START WITH FIRST TWO TROUGHS\n X2 = list(closeHist.loc[MinSeries.index]['x_index'].iloc[:2])\n Y2 = list(closeHist.loc[MinSeries.index]['Price(Monthly avg.)'].iloc[:2])\n ThreePtS = drawLine2P(x=X2,y=Y2,xlims=[closeHist['x_index'].values.min(),\n closeHist['x_index'].values.max()+1])\n sellLine_list.append(ThreePtS[1])\n else: \n ThreePtS = drawLine2P(x=X2,y=Y2,xlims=[closeHist['x_index'].values.min(),\n closeHist['x_index'].values.max()+1])\n sellLine_list.append(ThreePtS[1])\n\n #Calculate and plot Buy line:\n if len(X) < 2:\n pass\n else: \n ThreePtB = drawLine2P(x=X,y=Y,xlims=[closeHist['x_index'].values.min(),\n closeHist['x_index'].values.max()+1])\n buyLine_list.append(ThreePtB[1])\n\n\n Buy_Breach = max(closeHist[closeHist.x_index.isin(X2)].index)\n if verbose_mode:\n n = 1 #TESTING\n while Buy_Breach:\n # FIRST BUY ITERATION\n latestHist = closeHist.loc[Buy_Breach:]\n subSell = latestHist.index[latestHist['Price(Monthly avg.)'] < pd.Series(ThreePtS[1], closeHist.index).loc[Buy_Breach:]]\n if len(subSell) > 0:\n Sell_Breach = subSell[0] \n preBreach = MaxSeries[MaxSeries.index < Sell_Breach].index\n postBreach = MaxSeries[MaxSeries.index > Sell_Breach].index\n if verbose_mode:\n print(\"{} Sell Breach at {}, this is Breach #{}\".format(ticker, Sell_Breach, n)) #TESTING\n n+=1\n if len(postBreach) > 0:\n pt_1 = closeHist.loc[closeHist.loc[preBreach]['Price(Monthly avg.)'].idxmax()]\n pt_2 = closeHist.loc[postBreach[0]]\n Y2 = [pt_1['Price(Monthly avg.)'], pt_2['Price(Monthly avg.)']]\n X2 = [pt_1['x_index'], pt_2['x_index']]\n ThreePtB = drawLine2P(x=X2,y=Y2,xlims=[closeHist['x_index'].values.min(),\n closeHist['x_index'].values.max()+1])\n # plt.plot(closeHist.index, ThreePtB[1],\n # c='g', linestyle='dashed', \n # alpha=buyAlpha)\n buyLine_list.append(ThreePtB[1])\n else:\n Sell_Breach = None\n break \n else:\n Sell_Breach = None\n break\n while Sell_Breach:\n # FIRST SELL ITERATION\n latestHist = closeHist.loc[Sell_Breach:]\n superBuy = latestHist.index[latestHist['Price(Monthly avg.)'] > pd.Series(ThreePtB[1], closeHist.index).loc[Sell_Breach:]]\n if len(superBuy) > 0:\n Buy_Breach = superBuy[0]\n preBreach = MinSeries[MinSeries.index < Buy_Breach].index\n postBreach = MinSeries[MinSeries.index > Buy_Breach].index\n if verbose_mode:\n print(\"{} Buy Breach at {}, this is Breach #{}\".format(ticker, Buy_Breach, n)) #TESTING\n n+=1\n if len(postBreach) > 0:\n pt_1 = closeHist.loc[closeHist.loc[preBreach]['Price(Monthly avg.)'].idxmin()]\n pt_2 = closeHist.loc[postBreach[0]]\n Y2 = [pt_1['Price(Monthly avg.)'], pt_2['Price(Monthly avg.)']]\n X2 = [pt_1['x_index'], pt_2['x_index']]\n ThreePtS = drawLine2P(x=X2,y=Y2,xlims=[closeHist['x_index'].values.min(),\n closeHist['x_index'].values.max()+1])\n # plt.plot(closeHist.index, ThreePtS[1],\n # c='r', linestyle='dashed', \n # alpha=sellAlpha)\n sellLine_list.append(ThreePtS[1])\n\n break\n else:\n Buy_Breach = None\n break\n else:\n Buy_Breach = None\n break\n #sellLine_alpha = np.linspace(0.1, 1, len(sellLine_list))\n #buyLine_alpha = np.linspace(0.1, 1, len(buyLine_list))\n sellLine_alpha = np.flipud(np.linspace(1, 0.1, len(sellLine_list)+1)[:-1])\n buyLine_alpha = np.flipud(np.linspace(1, 0.1, len(buyLine_list)+1)[:-1])\n\n\n\n if len(sellLine_list) > 0:\n sellPrice = round(sellLine_list[-1][-1], 2)\n if sellPrice < 0:\n sellPrice = round(0.00, 2) \n print('Sell Price for', ticker, 'is', sellPrice)\n if len(buyLine_list) > 0:\n buyPrice = round(buyLine_list[-1][-1], 2)\n if buyPrice < 0:\n buyPrice = round(0.00, 2)\n print('Buy Price for', ticker, 'is', buyPrice)\n\n plt.figure(figsize=[20,9])\n with plt.style.context('fivethirtyeight'):\n plt.plot(closeHist['Price(Monthly avg.)'], zorder=0)\n \n if verbose_mode:\n for i in np.arange(len(sellLine_list)):\n plt.plot(closeHist.index, sellLine_list[i],\n c='r', linestyle='dashed', \n alpha=sellLine_alpha[i])\n\n for i in np.arange(len(buyLine_list)):\n plt.plot(closeHist.index, buyLine_list[i],\n c='g', linestyle='dashed', \n alpha=buyLine_alpha[i])\n\n if len(sellLine_list) > 0:\n plt.plot(closeHist.index, sellLine_list[-1],\n c='r',\n alpha=1)\n \n if len(buyLine_list) > 0:\n plt.plot(closeHist.index, buyLine_list[-1],\n c='g', \n alpha=1) \n\n plt.scatter(MinSeries.index, \n MinSeries,\n c='r', s=50, zorder=10)\n plt.scatter(MaxSeries.index, \n MaxSeries,\n c='g', s=50, zorder=10)\n # plt.scatter(MaxMaxSeries.index, \n # MaxMaxSeries,\n # c='y', s=100, zorder=5)\n # plt.scatter(MinMinSeries.index, \n # MinMinSeries,\n # c='y', s=100, zorder=5)\n plt.title(\"Buy and Sell Lines for \"+ ticker, {'fontsize':20})\n plt.autoscale()\n num = closeHist['Price(Monthly avg.)'].min()\n Y_lim_min = math.floor(num / 10 ** math.floor(math.log10(num))) * 10 ** math.floor(math.log10(num))\n num = closeHist['Price(Monthly avg.)'].max()\n Y_lim_max = math.ceil(num / 10 ** math.floor(math.log10(num))) * 10 ** math.floor(math.log10(num))\n plt.ylim(0, Y_lim_max)#,Y_lim_max)\n plt.show()", "def PlotSeries(self, header):\n \n data = self.DictData()\n \n def convert_date(string):\n # print string\n ddmmyy = string.split( \"/\" )\n # print ddmmyy\n ddmmyy.reverse()\n \n yymmdd = [int(i) for i in ddmmyy]\n # print yymmdd\n return pydate(2000 + yymmdd[0], yymmdd[1], yymmdd[2])\n \n time_series = [ (convert_date(row[ \"Date\" ]), float(row[ header ])) \\\n for row in data ]\n return time_series", "def plot(self):\n pass", "def temphum_plot(self, kwargs=None):\n\n def valuechange():\n \"\"\"This is the function which is called, when a value is changed in the spin boxes\"\"\"\n\n tempmin.setMaximum(tempmax.value())\n tempmax.setMinimum(tempmin.value())\n hummin.setMaximum(hummax.value())\n hummax.setMinimum(hummin.value())\n\n self.variables.default_values_dict[\"settings\"][\n \"current_tempmin\"\n ] = tempmin.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_tempmax\"\n ] = tempmax.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_hummin\"\n ] = hummin.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_hummax\"\n ] = hummax.value()\n\n max = build_command(\n self.variables.devices_dict[\"temphum_controller\"],\n (\"set_hummax\", hummax.value()),\n )\n min = build_command(\n self.variables.devices_dict[\"temphum_controller\"],\n (\"set_hummin\", hummin.value()),\n )\n\n self.variables.vcw.write(\n self.variables.devices_dict[\"temphum_controller\"], max\n )\n self.variables.vcw.write(\n self.variables.devices_dict[\"temphum_controller\"], min\n )\n\n def dry_air_action():\n if dry_air_btn.isChecked():\n device_dict = self.variables.devices_dict[\"temphum_controller\"]\n try:\n command = build_command(\n device_dict, (\"set_environement_control\", \"ON\")\n )\n answer = self.variables.vcw.write(device_dict, command)\n if not answer:\n self.log.error(\n \"The environement controller did not responsed accordingly. Answer: \"\n + str(answer).strip()\n )\n return 0\n except:\n self.log.error(\n \"An error occured while changing the environement control\"\n )\n return 0\n dry_air_btn.setText(\"Humidity ctl. on\")\n self.variables.default_values_dict[\"settings\"][\n \"humidity_control\"\n ] = True\n\n else:\n device_dict = self.variables.devices_dict[\"temphum_controller\"]\n try:\n command = build_command(\n device_dict, (\"set_environement_control\", \"OFF\")\n )\n answer = self.variables.vcw.write(device_dict, command)\n if not answer:\n self.log.error(\n \"The environement controller did not responsed accordingly. Answer: \"\n + str(answer).strip()\n )\n\n return 0\n except:\n self.log.error(\n \"An error occured while changing the environement control\"\n )\n return 0\n dry_air_btn.setText(\"Humidity ctl. off\")\n self.variables.default_values_dict[\"settings\"][\n \"humidity_control\"\n ] = False\n\n def light_action():\n \"\"\"This function is debricated\"\"\"\n if light_btn.isChecked():\n self.variables.default_values_dict[\"settings\"][\"external_lights\"] = True\n else:\n self.variables.default_values_dict[\"settings\"][\n \"external_lights\"\n ] = False\n\n def check_light_state():\n if (\n self.variables.default_values_dict[\"settings\"][\"lights\"]\n and not light_btn.text() == \"Lights on\"\n ): # Checks if the lights are on and the button is off\n light_btn.setText(\"Lights on\")\n light_btn.setStyleSheet(\"background : rgb(0,255,0); border-radius: 5px\")\n elif (\n not self.variables.default_values_dict[\"settings\"][\"lights\"]\n and not light_btn.text() == \"Lights off\"\n ):\n light_btn.setText(\"Lights off\")\n light_btn.setStyleSheet(\"background : rgb(255,0,0); border-radius: 5px\")\n\n def config_plot(plot, plot2, pg):\n plot = plot.plotItem\n plot.setLabel(\"right\", \"humidity\", units=\"%\")\n plot.setLabel(\"bottom\", \"time\")\n plot.setLabel(\"left\", \"temperature\", units=\"Celsius\")\n plot.getAxis(\"left\").setPen(pg.mkPen(color=\"#c4380d\", width=3))\n plot.getAxis(\"right\").setPen(pg.mkPen(color=\"#025b94\", width=3))\n plot.showAxis(\"top\", show=True)\n plot.getAxis(\"top\").setTicks([])\n plot.getAxis(\"bottom\").setScale(1e-9)\n # plot.setRange(yRange=[15, 35])\n\n # For second plot\n plot.scene().addItem(\n plot2\n ) # inserts the second plot into the scene of the first\n plot2.setGeometry(plot.vb.sceneBoundingRect())\n plot.getAxis(\"right\").linkToView(\n plot2\n ) # links the second y axis to the second plot\n plot2.setXLink(plot) # sync the x axis of both plots\n # plot2.setRange(yRange=[0, 50])\n\n def __cut_arrays(data_array, maximum_time, arrays_to_cut):\n \"\"\"This function cuts an array to a maximum time difference\n This function is supposed to be used only for temp and humidity shaped arrays\n \"\"\"\n\n try:\n begin_time = data_array[arrays_to_cut[0]][0][0]\n end_time = data_array[arrays_to_cut[0]][0][-1]\n delta_time = (\n data_array[arrays_to_cut[0]][0][1]\n - data_array[arrays_to_cut[0]][0][0]\n )\n total_time = end_time - begin_time\n if total_time > maximum_time:\n over_time = total_time - maximum_time\n array_elm_to_drop = int(over_time / delta_time)\n for arrays in arrays_to_cut:\n data_array[arrays][0] = data_array[arrays][0][\n array_elm_to_drop:\n ]\n data_array[arrays][1] = data_array[arrays][1][\n array_elm_to_drop:\n ]\n except:\n pass\n\n def update_temphum_plots(kwargs=None):\n # for rooms in self.rooms:\n if self.variables.default_values_dict[\"settings\"][\"new_data\"]:\n temphum_plot.clear() # clears the plot and prevents a memory leak\n hum_plot_obj.clear()\n p1 = temphum_plot.plotItem\n\n ax = p1.getAxis(\"bottom\") # This is the trick\n __cut_arrays(\n self.variables.meas_data,\n float(\n self.variables.default_values_dict[\"settings\"].get(\n \"temp_history\", 3600\n )\n ),\n [\"temperature\", \"humidity\"],\n )\n ax.setTicks(\n [\n get_thicks_for_timestamp_plot(\n self.variables.meas_data[\"temperature\"][0],\n 5,\n self.variables.default_values_dict[\"settings\"][\n \"time_format\"\n ],\n )\n ]\n )\n\n try:\n if len(self.variables.meas_data[\"temperature\"][0]) == len(\n self.variables.meas_data[\"humidity\"][1]\n ): # sometimes it happens that the values are not yet ready\n p1.plot(\n self.variables.meas_data[\"temperature\"][0],\n self.variables.meas_data[\"temperature\"][1],\n pen={\"color\": \"r\", \"width\": 2},\n clear=True,\n )\n plot_item = setpg.PlotCurveItem(\n self.variables.meas_data[\"humidity\"][0],\n self.variables.meas_data[\"humidity\"][1],\n pen={\"color\": \"b\", \"width\": 2},\n clear=True,\n )\n hum_plot_obj.addItem(plot_item)\n del plot_item # the plot class needs a plot item which can be rendered, to avoid a mem leak delete the created plot item or 20k ram will be used\n # hum_plot_obj.addItem(setpg.plot(self.variables.meas_data[\"humidity\"][0],self.variables.meas_data[\"humidity\"][1],pen={'color': \"b\", 'width': 2}, clear=True))\n hum_plot_obj.setGeometry(\n p1.vb.sceneBoundingRect()\n ) # resize the second plot!\n except:\n pass\n\n # Create sublayout\n temphum_layout = QGridLayout()\n\n # Frame over the objects\n frame = QLabel()\n frame.setFrameStyle(QFrame.Box | QFrame.Raised)\n frame.setLineWidth(0)\n frame.setMidLineWidth(2)\n\n self.layout.addWidget(\n frame, self.temp_ypos, self.temp_xpos, self.temp_ysize, self.temp_xsize\n )\n\n x = np.zeros(1)\n y = np.zeros(1)\n\n setpg = pq\n # date_axis = CAxisTime(orientation='bottom') # Correctly generates the time axis\n hum_plot_obj = setpg.ViewBox() # generate new plot item\n temphum_plot = pq.PlotWidget()\n config_plot(temphum_plot, hum_plot_obj, setpg) # config the plot items\n\n self.variables.add_update_function(update_temphum_plots)\n\n # Additional Variables will be generated for temp and hum\n # self.variables.default_values_dict[\"settings\"].update({\"lights\": False, \"humidity_control\": True, \"current_tempmin\": 20, \"current_tempmax\": 25, \"current_hummin\": 20,\"current_hummax\": 25})\n\n # Spin Boxes for temp and humidity\n\n tempmin = QSpinBox()\n tempmax = QSpinBox()\n hummin = QSpinBox()\n hummax = QSpinBox()\n\n # Spinbox label\n textbox_temp = QLabel()\n textbox_temp.setText(\"Min temp. Max temp.\")\n textbox_temp.setFont(self.font)\n textbox_hum = QLabel()\n textbox_hum.setText(\"Min hum. Max hum.\")\n textbox_hum.setFont(self.font)\n\n # Config\n\n tempmin.setRange(15, 35)\n tempmin.setValue(\n float(\n self.variables.default_values_dict[\"settings\"].get(\"current_tempmin\", 0)\n )\n )\n tempmax.setRange(15, 35)\n tempmax.setValue(\n float(\n self.variables.default_values_dict[\"settings\"].get(\"current_tempmax\", 0)\n )\n )\n tempmin.valueChanged.connect(valuechange)\n tempmax.valueChanged.connect(valuechange)\n\n hummin.setRange(0, 70)\n hummin.setValue(\n float(\n self.variables.default_values_dict[\"settings\"].get(\"current_hummin\", 0)\n )\n )\n hummax.setRange(0, 70)\n hummax.setValue(\n float(\n self.variables.default_values_dict[\"settings\"].get(\"current_hummax\", 0)\n )\n )\n hummin.valueChanged.connect(valuechange)\n hummax.valueChanged.connect(valuechange)\n\n # Push buttons on the right for humidity control and light control\n\n dry_air_btn = QPushButton(\"Humidity ctl. off\")\n self.variables.default_values_dict[\"settings\"][\"humidity_control\"] = False\n dry_air_btn.setCheckable(True)\n dry_air_btn.toggle()\n dry_air_btn.clicked.connect(dry_air_action)\n dry_air_btn.setChecked(False)\n\n light_btn = QLabel()\n light_btn.setText(\"State not defined\")\n light_btn.setAlignment(QtCore.Qt.AlignVCenter | QtCore.Qt.AlignHCenter)\n light_btn.setStyleSheet(\"background : rgb(255,0,0); border-radius: 5px\")\n\n # light_btn.setCheckable(True)\n # light_btn.clicked.connect(light_action)\n\n # Humidity\n # temphum_plot.plot(x,y, pen=\"b\")\n\n # Widgets add\n temphum_layout.addWidget(textbox_temp, 0, 0, 1, 2)\n temphum_layout.addWidget(tempmin, 1, 0)\n temphum_layout.addWidget(tempmax, 1, 1)\n\n temphum_layout.addWidget(textbox_hum, 2, 0, 1, 2)\n temphum_layout.addWidget(hummin, 3, 0)\n temphum_layout.addWidget(hummax, 3, 1)\n\n temphum_layout.addWidget(dry_air_btn, 4, 0, 1, 2)\n temphum_layout.addWidget(light_btn, 5, 0, 3, 2)\n\n temphum_layout.addWidget(temphum_plot, 0, 3, 10, 2)\n\n temphum_layout.setContentsMargins(8, 8, 0, 8) # Makes a margin to the layout\n\n # Add the layout to the main layout\n self.layout.addLayout(\n temphum_layout,\n self.temp_ypos,\n self.temp_xpos,\n self.temp_ysize,\n self.temp_xsize,\n )\n\n def update():\n pass\n\n self.variables.add_update_function(update)\n self.variables.add_update_function(check_light_state)", "def plot(self):\n # Get data\n #print(self.file_name)\n fig, ax = plb.subplots(1,1,figsize=(18,20))\n for key,value in self.testTrend.items():\n x = np.arange(len(self.data_array))\n y = np.asarray(value)\n plb.plot(x,y, label=key)\n ax.scatter(x, y)\n for i in range(0, len(value)):\n ax.annotate(str(i), (x[i], y[i]))\n # Title\n plb.title(self.file_name)\n # Legend\n plb.legend(bbox_to_anchor=(.05, 1), loc='best', borderaxespad=0.)\n # x ticks\n plb.xticks(np.arange(min(x), max(x) + 1, 2.0))\n #plb.ylim(-250, 1)\n # Show image\n plb.show()", "def test_plot_timeseries_univariate(tmpdir, random):\n x = np.linspace(0, 10, 20)\n y = np.sin(x)\n segments = get_test_segments(data=np.expand_dims(y, 0))\n\n output_path = Path(tmpdir) / 'temp_visualization_test_univariate.png'\n\n plot_timeseries(x=x,\n y=y,\n segments=segments,\n show_plot=False,\n output_filename=output_path)\n\n assert output_path.exists()", "def plotData(BX,BY,xi,yi,expArr,t,savepath_dir):\r\n \r\n #Find the current channel data\r\n Jz=newCurrent(BX,BY,xi,yi,expArr,t)\r\n\r\n #Find the dipole vector components\r\n BxTime=np.real(BX*expArr[t])\r\n ByTime=np.real(BY*expArr[t])\r\n\r\n #Plot the current density contour and dipole vector grid\r\n #Create the figure\r\n p1=plt.figure(figsize=(9,8))\r\n \r\n #Plot the data\r\n p1=plt.contourf(xi,yi,Jz,levels=100,vmin=-0.1,vmax=0.1)\r\n qv1=plt.quiver(xi,yi,BxTime,ByTime,width=0.004,scale=3)\r\n \r\n #Add axes labels and title\r\n p1=plt.xlabel('X [cm]',fontsize=20)\r\n p1=plt.ylabel('Y [cm]',fontsize=20)\r\n # p1=plt.title('Alfven Wave Dipole; Frequency='+str(freq)+r'KHz; $\\nu_{ei}$='+str(col)+'KHz',fontsize=19,y=1.02)\r\n p1=plt.title('E Field; Frequency='+str(freq)+r'KHz; $\\nu_{ei}$='+str(col)+'KHz',fontsize=19,y=1.02)\r\n \r\n #Set axes parameters\r\n p1=plt.xticks(np.arange(-50,51,5))\r\n p1=plt.yticks(np.arange(-50,51,5))\r\n p1=plt.xlim(-xAxisLim,xAxisLim)\r\n p1=plt.ylim(-yAxisLim,yAxisLim)\r\n \r\n #Add colorbar\r\n cbar=plt.colorbar()\r\n cbar.set_label('Normalized Current Density',rotation=270,labelpad=15)\r\n cbar=plt.clim(-1,1)\r\n \r\n #Add vector label\r\n plt.quiverkey(qv1,-0.1,-0.1,0.2,label=r'$(B_x,B_y)$')\r\n \r\n #Miscellaneous\r\n p1=plt.tick_params(axis='both', which='major', labelsize=18)\r\n p1=plt.grid(True)\r\n p1=plt.gcf().subplots_adjust(left=0.15)\r\n\r\n #Save the plot\r\n savepath_frame=savepath_dir+'frame'+str(t+1)+'.png'\r\n p1=plt.savefig(savepath_frame,dpi=100,bbox_to_anchor='tight')\r\n p1=plt.close()\r\n\r\n #Let me know which frame we just saved\r\n print('Saved frame '+str(t+1)+' of '+str(len(expArr)))\r\n \r\n return", "def epics_data_plot(data):\n if isinstance(data, (xr.DataArray, xr.Dataset)):\n data = data.to_dataframe()", "def plot (self):\n \n plt.stem(self.nTs, self._signal)", "def render_data_points(times, data_points, config):\n\n\twith warnings.catch_warnings():\n\t\twarnings.simplefilter(\"ignore\")\n\t\tpyplot.pause(0.01)\n\n\tfor id_, graph in enumerate(data_points.values(), start=1):\n\t\tif config[\"subplots\"][\"show\"]:\n\t\t\tpyplot.subplot(\n\t\t\t\tconfig[\"subplots\"][\"vertical\"],\n\t\t\t\tconfig[\"subplots\"][\"horizontal\"],\n\t\t\t\tid_\n\t\t\t)\n\n\t\ty_values = normalize(graph[\"values\"]) if config[\"normalize\"] \\\n\t\t\telse graph[\"values\"]\n\t\tgraph[\"graph\"].set_data(times, y_values)\n\n\taxes = pyplot.gca()\n\taxes.relim()\n\taxes.autoscale_view()\n\tpyplot.draw()", "def temperature(self):\n self.convert_window(\"Temperature\", \"Celsius\", [\"Celsius\", \"Fahrenheit\", \"Kelvin\", \"Rankine\", \"Reaumur\", \"Newton\", \"Romer\", \"Delisle\"])" ]
[ "0.77332544", "0.7619313", "0.7040905", "0.6752579", "0.6656995", "0.6611152", "0.6522687", "0.6483575", "0.63331616", "0.6321086", "0.6280439", "0.6280298", "0.6186041", "0.61790603", "0.61554503", "0.61331767", "0.6095322", "0.6091136", "0.6080695", "0.6079412", "0.6053336", "0.604989", "0.6045564", "0.60411644", "0.602891", "0.6026625", "0.6018399", "0.5997073", "0.5962086", "0.59548324", "0.5940484", "0.5910146", "0.5901463", "0.58949906", "0.5876008", "0.586596", "0.5828777", "0.5817324", "0.58103675", "0.5809377", "0.57985276", "0.5791019", "0.57863915", "0.57857645", "0.5785657", "0.578341", "0.57804", "0.57782006", "0.57759434", "0.57730496", "0.57585573", "0.5757645", "0.57469046", "0.5743261", "0.5729074", "0.5727336", "0.57232356", "0.5721834", "0.57082355", "0.5707652", "0.57075334", "0.57047546", "0.5700818", "0.5686863", "0.5674301", "0.56677765", "0.5664995", "0.5657585", "0.56418496", "0.5634853", "0.56271213", "0.561758", "0.5610347", "0.5609119", "0.5608366", "0.5605292", "0.5599986", "0.55956495", "0.55856514", "0.55836844", "0.55759704", "0.5574417", "0.55711764", "0.55701", "0.5565189", "0.55614185", "0.55487347", "0.554857", "0.55466926", "0.55455416", "0.5545459", "0.5541757", "0.5535475", "0.55349874", "0.5530265", "0.5528925", "0.55287635", "0.55283034", "0.55251646", "0.55246466", "0.5523901" ]
0.0
-1
The providerassigned unique ID for this managed resource.
def id(self) -> str: return pulumi.get(self, "id")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def provider_id(self):\n return self.get('_id')", "def provider_id(self):\n raise NotImplementedError", "def id(self):\n return self.raw_resource.uuid", "def healthcare_provider_id(self):\n return self._healthcare_provider_id", "def unique_identifier(self) -> str:\n return pulumi.get(self, \"unique_identifier\")", "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "def unique_id(self) -> str:\n return pulumi.get(self, \"unique_id\")", "def unique_id(self):\r\n return f\"{DOMAIN}_{self.charge_point_id}_{self.connector_id}\"", "def unique_id(self) -> str:\n return self._unique_id", "def unique_id(self) -> str:\n return self._unique_id", "def unique_id(self) -> str:\n return self._unique_id", "def unique_id(self) -> str:\n return self._unique_id", "def unique_id(self) -> str:\n return self._unique_id", "def unique_id(self) -> str:\n return self._unique_id", "def unique_id(self) -> str:\n return self._unique_id", "def unique_id(self) -> str:\n return self._unique_id", "def unique_id(self):\n return self._uuid", "def unique_id(self):\n return self._uuid", "def unique_id(self) -> str:\n return self._uid", "def unique_id(self):\n return f\"{self.device.id}-{self.key}\"", "def unique_id(self):\n return self.properties.get(\"UniqueId\", None)", "def internal_id(self) -> str:\n return pulumi.get(self, \"internal_id\")", "def unique_id(self) -> str:\n return f\"{self._host}_{self._name}_{self._unique_id}\"", "def custom_id(self) -> str:\n return self._underlying.custom_id", "def unique_id(self):\n return self._id", "def unique_id(self):\n return self._id", "def unique_id(self):\n return (\n \"a80f3d5b-df3d-4e38-bbb7-1025276830cd\"\n )", "def get_objectID(self):\n return self.resource.uuid", "def unique_id(self):\n return self._uid", "def unique_id(self):\n return self._uid", "def unique_id(self):\n return self._uid", "def unique_id(self):\n id = \"{}{}{}\".format(\n DOMAIN, self._account, self.sensorName.lower().replace(\" \", \"\")\n )\n return id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self):\n return self._unique_id", "def unique_id(self) -> str:\n return self.get_unique_id(wallet=self.wallet_id, nonce=self.nonce)", "def unique_id(self):\n return self.device_id", "def get_id(self):\n \"\"\"Requires use of Python 3\"\"\"\n return str(self.id)", "def resourceid(self):", "def unique_id(self) -> str:\n return '{0}_{1}'.format(self._mac.replace(':', ''), self.entity_id)", "def unique_id(self):\n return self._device_id", "def getid(self):\n if self.Id is None:\n return self.internalid\n else:\n return self.Id", "def getid(self):\n if self.Id is None:\n return self.internalid\n else:\n return self.Id", "def getid(self):\n if self.Id is None:\n return self.internalid\n else:\n return self.Id", "def getid(self):\n if self.Id is None:\n return self.internalid\n else:\n return self.Id", "def getid(self):\n if self.Id is None:\n return self.internalid\n else:\n return self.Id", "def getid(self):\n if self.Id is None:\n return self.internalid\n else:\n return self.Id", "def get_id(self):\n return str(self._id)", "def get_id(self):\n return str(self._id)", "def identity(self) -> str:\n return self.requester.uuid", "def get_id(self) -> str:\n return self._register_id", "def get_unique_id(self):\n if not self.unique_id:\n self.unique_id = uuid.uuid4().hex\n return self.unique_id", "def unique_id(self):\n return f\"{self.config_entry.entry_id}_{self.hub_name}_{self.sensor_name}\"", "def getID(self):\n return str(self._storage_id)", "def id(self):\n return self.raw_resource[\"id\"]", "def get_id(self): # real signature unknown; restored from __doc__\n return \"\"", "def id(self) -> str:\r\n return self._id", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def identifier(self):\n return self.__id", "def get_id(self):\n return self.uid", "def resource_id(self) -> Optional[str]:\n return pulumi.get(self, \"resource_id\")", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id", "def id(self) -> str:\n return self._id" ]
[ "0.8193402", "0.7851373", "0.77124894", "0.7604287", "0.7477648", "0.7476093", "0.7476093", "0.7476093", "0.7425807", "0.7380237", "0.7371964", "0.7371964", "0.7371964", "0.7371964", "0.7371964", "0.7371964", "0.7371964", "0.7371964", "0.735787", "0.735787", "0.73477197", "0.7291611", "0.72812176", "0.72517675", "0.7251651", "0.7218092", "0.7211636", "0.7211636", "0.7201574", "0.7181422", "0.7166036", "0.7166036", "0.7166036", "0.7138984", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.71376497", "0.7133902", "0.7126198", "0.7119549", "0.71155995", "0.70892346", "0.7068222", "0.7059289", "0.7059289", "0.7059289", "0.7059289", "0.7059289", "0.7059289", "0.70582974", "0.70582974", "0.7053728", "0.70350826", "0.70212394", "0.7020135", "0.7014936", "0.7014571", "0.70135075", "0.7007213", "0.69911283", "0.69911283", "0.69911283", "0.69911283", "0.6989271", "0.69725364", "0.69559777", "0.6937233", "0.6937233", "0.6937233", "0.6937233", "0.6937233", "0.6937233", "0.6937233", "0.6937233", "0.6937233", "0.6937233", "0.6937233", "0.6937233", "0.6937233" ]
0.0
-1
Use this data source to retrieve basic information about all standalone VPCs available for an account. Uses the included apikey in provider configuration to determine which account to read from. Example Usage Can be used in other resources/data sources when the VPC identifier is unknown, while other attributes are known. E.g. find correct VPC using the `name` you gave your VPC. Then iterate over VPCs to find the matching one and extract the VPC identifier. ```python import pulumi import pulumi_cloudamqp as cloudamqp my_vpc_name = "" vpc_list = cloudamqp.get_account_vpcs() pulumi.export("vpcId", [vpc for vpc in vpc_list.vpcs if vpc.name == my_vpc_name][0].id) ``` Attributes reference All attributes reference are computed `id` The identifier for this data source. Set to `na` since there is no unique identifier. `vpcs` An array of VPCs. Each `vpcs` block consists of the fields documented below. The `vpcs` block consist of `id` The VPC identifier. `name` The VPC instance name. `region` The region the VPC is hosted in. `subnet` The VPC subnet. `tags` Optional tags set for the VPC. `vpc_name` VPC name given when hosted at the cloud provider. Dependency This data source depends on apikey set in the provider configuration.
def get_account_vpcs(opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAccountVpcsResult: __args__ = dict() opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('cloudamqp:index/getAccountVpcs:getAccountVpcs', __args__, opts=opts, typ=GetAccountVpcsResult).value return AwaitableGetAccountVpcsResult( id=__ret__.id, vpcs=__ret__.vpcs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_vpc ( vpc_conn, vpc_name ) :\n vpcs = vpc_conn.get_all_vpcs( filters = { \"tag:Name\": [ vpc_name ] } )\n for v in vpcs :\n return v", "def get_vpc_data(self, vpc_id, region):\n if not vpc_id:\n return None\n tmp_vpc_client = ibm.client(region=region)\n try:\n vpc_data = tmp_vpc_client.get_vpc(vpc_id).result\n return vpc_data\n except ibm.ibm_cloud_sdk_core.ApiException as e:\n if e.code == 404:\n logger.debug(\"VPC doesn't exist.\")\n return None\n else:\n raise", "def get_all_vpc_instances ( ec2_conn, vpc ) :\n return ec2_conn.get_only_instances( filters = { \"vpc-id\" : vpc.id } )", "def get_aws_vpc_if_exists(vpc_id_name, aws_region=None):\n response = boto3.client('ec2', region_name=aws_region).describe_vpcs(\n Filters=[\n {\n 'Name': 'vpc-id',\n 'Values': [\n vpc_id_name,\n ]\n },\n ],\n )['Vpcs']\n\n if response:\n vpc_cidr = ipaddress.ip_network(response[0]['CidrBlock'])\n vpc_id = response[0]['VpcId']\n vpc_name = get_aws_resource_name(response[0])\n return PyVPCBlock(network=vpc_cidr, resource_id=vpc_id, name=vpc_name, resource_type='vpc')\n\n # In case no VPC found using vpc-id filter, try using input as name filter\n response = boto3.client('ec2', region_name=aws_region).describe_vpcs(\n Filters=[\n {\n 'Name': 'tag:Name',\n 'Values': [\n vpc_id_name,\n ]\n },\n ],\n )['Vpcs']\n\n # There is a single vpc with 'vpc_id_name'\n if len(response) == 1:\n vpc_cidr = ipaddress.ip_network(response[0]['CidrBlock'])\n vpc_id = response[0]['VpcId']\n vpc_name = get_aws_resource_name(response[0])\n return PyVPCBlock(network=vpc_cidr, resource_id=vpc_id, name=vpc_name, resource_type='vpc')\n # Is case there are multiple VPCs with the same name, raise exception\n elif len(response) > 1:\n found = []\n for x in response:\n found.append(x['VpcId'])\n raise ValueError(\"more then one vpc found with name {} - {}\".format(vpc_id_name, str(found)))\n\n # Nothing found\n return None", "def create(ctx, **kwargs):\n\n cloud_driver = get_cloud_driver(ctx)\n\n vpc = {\n 'description': None,\n 'name': ctx.node_id,\n }\n\n ctx.logger.debug('reading vpc configuration.')\n vpc.update(ctx.properties['network'])\n\n vpc_name = vpc['name']\n cidr = vpc['cidr']\n zone = vpc['zone']\n location = get_location(cloud_driver, zone)\n vpcoffer = vpc['service_offering']\n vpc_offering = get_vpc_offering(cloud_driver, vpcoffer)\n\n ctx.logger.info('Current node {0}{1}'.format(ctx.node_id, ctx.properties))\n\n ctx['vpc_id'] = ctx.properties\n\n if not _vpc_exists(cloud_driver, vpc_name):\n ctx.logger.info('creating vpc: {0}'.format(vpc_name))\n\n vpc = cloud_driver.ex_create_vpc(\n cidr=cidr,\n name=vpc_name,\n display_text=vpc_name,\n vpc_offering=vpc_offering,\n zone_id=location.id)\n else:\n ctx.logger.info('using existing vpc network {0}'.\n format(vpc_name))\n vpc = get_vpc(cloud_driver, vpc_name)\n\n ctx['vpc_id'] = vpc.id\n ctx['vpc_name'] = vpc.name", "def create_vpc(self):\n vpc_data = self.vpc_client.create_vpc(\n address_prefix_management=\"auto\",\n classic_access=False,\n name=f\"sky-vpc-{self.cluster_name}-{str(uuid.uuid4())[:5]}\",\n resource_group={\"id\": self.resource_group_id},\n ).get_result()\n subnet_data = self.create_subnet(vpc_data[\"id\"], self.zone)\n self.create_public_gateway(vpc_data[\"id\"], self.zone, subnet_data)\n sg_id = self.create_sg_rules(vpc_data)\n\n # tag vpc with the cluster's name\n resource_model = {\"resource_id\": vpc_data[\"crn\"]}\n self.tagging_client.attach_tag(\n resources=[resource_model], tag_names=[self.cluster_name], tag_type=\"user\"\n ).get_result()\n\n return {\n \"vpc_id\": vpc_data[\"id\"],\n \"subnet_id\": subnet_data[\"id\"],\n \"security_group_id\": sg_id,\n }", "def __str__(self):\n return(self.vpc_id)", "def create_or_fetch_vpc(self, region, zone):\n\n # refresh client region scope if region changed.\n if self.region and self.region != region:\n self.vpc_client = ibm.client(region=region)\n self.region = region\n self.zone = zone\n reused_vpc_data = None\n # pylint: disable=line-too-long\n vpcs_filtered_by_tags_and_region = self.search_client.search(\n query=f\"type:vpc AND tags:{self.cluster_name} AND region:{self.region}\",\n fields=[\"tags\", \"region\", \"type\"],\n limit=1000,\n ).get_result()[\"items\"]\n for vpc in vpcs_filtered_by_tags_and_region:\n vpc_id = vpc[\"crn\"].rsplit(\":\", 1)[-1]\n vpc_data = self.get_vpc_data(vpc_id, self.region)\n if vpc_data[\"status\"] == \"available\":\n reused_vpc_data = vpc_data\n break\n # found vpc tagged with cluster name in the required region\n if reused_vpc_data:\n # using self.region since tagged vpc is in the same region\n subnets = self.get_vpc_subnets(reused_vpc_data, self.region)\n subnet_in_zone = next(\n (subnet for subnet in subnets if subnet[\"zone\"][\"name\"] == self.zone),\n None,\n )\n # found a subnet in the required zone\n if subnet_in_zone:\n subnet_id = subnet_in_zone[\"id\"]\n public_gateway = subnet_in_zone.get(\"public_gateway\")\n if not public_gateway:\n public_gateway = self.create_public_gateway(\n reused_vpc_data[\"id\"], self.zone, subnet_in_zone\n )\n # tagged vpc found doesn't have a subnet in the required zone\n else:\n subnet_data = self.create_subnet(reused_vpc_data[\"id\"], self.zone)\n subnet_id = subnet_data[\"id\"]\n public_gateway = self.create_public_gateway(\n reused_vpc_data[\"id\"], self.zone, subnet_data\n )\n\n # add missing security group rules if needed\n security_group = reused_vpc_data.get(\"default_security_group\")\n if security_group:\n sg_id = security_group[\"id\"]\n self.add_missing_sg_rules(sg_id)\n\n # managed to reuse found VPC\n logger.info(\n f\"Reusing VPC {reused_vpc_data['id']} named: {reused_vpc_data['name']}\"\n )\n return {\n \"vpc_id\": reused_vpc_data[\"id\"],\n \"subnet_id\": subnet_id,\n \"security_group_id\": sg_id,\n }\n\n # delete a tagged vpc that doesn't meet requirements\n if reused_vpc_data:\n self.delete_vpc(reused_vpc_data[\"id\"], self.region)\n # create a new vpc\n vpc_tags = self.create_vpc()\n return vpc_tags", "def getvpc(ec2, glob):\n\treturn [\n\t\ti for i in ec2.vpcs.filter(\n\t\t\tFilters=[ {'Name': 'tag:Name', 'Values': [glob]} ]\n\t\t)\n\t]", "def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> str:\n return pulumi.get(self, \"vpc_id\")", "def getSDDCConnectedVPC(**kwargs):\n proxy_url = kwargs['proxy']\n session_token = kwargs[\"sessiontoken\"]\n # NSX \n json_response = get_conencted_vpc_json(proxy_url, session_token)\n if json_response == None:\n sys.exit(1)\n sddc_connected_vpc = json_response['results'][0]\n sddc_connected_vpc_services = get_connected_vpc_services_json(proxy_url, session_token, sddc_connected_vpc['linked_vpc_id'])\n# The API changed for connected VPCs from M16 to M18 when the connected VPC prefix lists were added to M18.\n# This if-else block should allow this function to work with both M16 and earlier as well as M18 and newer SDDCs.\n if 'active_eni' in sddc_connected_vpc:\n eni = sddc_connected_vpc['active_eni']\n elif 'traffic_group_eni_mappings' in sddc_connected_vpc:\n eni = sddc_connected_vpc['traffic_group_eni_mappings'][0]['eni']\n else:\n eni = \"Unknown\"\n table = PrettyTable(['Customer-Owned Account', 'Connected VPC ID', 'Subnet', 'Availability Zone', 'ENI', 'Service Name', 'Service Access'])\n table.add_row([sddc_connected_vpc['linked_account'], sddc_connected_vpc['linked_vpc_id'], sddc_connected_vpc['linked_vpc_subnets'][0]['cidr'], sddc_connected_vpc['linked_vpc_subnets'][0]['availability_zone'], eni, sddc_connected_vpc_services['results'][0]['name'],sddc_connected_vpc_services['results'][0]['enabled']])\n print(\"Connected Services\")\n print(table)", "def vpc_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"vpc_id\")", "def process_vpc ( aws, region_name, aws_account_type, params ) :\n base_name = params[ 'base-name' ].upper( )\n\n vpc_conn = aws.vpc_conn( )\n vpc = get_vpc( vpc_conn, get_vpc_name( base_name ) )\n\n if not vpc :\n if params.get( 'create', 'NO' ) == 'YES' :\n vpc = create_vpc( vpc_conn,\n aws.ec2_conn( ),\n aws.clw_conn( ),\n aws.r53_conn( ),\n aws_account_type,\n region_name,\n aws.base_topicarn( ),\n base_name,\n params )\n if not vpc : \n print \"ERROR: cannot find VPC with name \" + get_vpc_name( base_name )\n sys.exit( 1 )\n\n if params.get( 'applications' ) :\n print \"Processing applications\"\n for application in params[ 'applications' ] :\n process_application( vpc_conn,\n aws.ec2_conn( ),\n aws.elb_conn( ),\n aws.clw_conn( ),\n aws.r53_conn( ),\n aws.s3_infrastructure_conn( ),\n aws.iam_conn( ),\n vpc,\n base_name,\n aws.base_topicarn( ),\n region_name,\n aws_account_type,\n application )\n\n secgrp_map = {}\n if params.get( 'security-groups' ) :\n secgrp_map = process_security_groups( aws.ec2_conn( ), vpc, base_name, params[ 'security-groups' ] )\n\n if params.get( 'security-group-internal-rules' ) :\n process_security_group_internal_rules( aws.ec2_conn( ), base_name, params[ 'security-group-internal-rules' ], secgrp_map )\n \n for load_balancer_params in params.get( 'load-balancers', [] ) :\n process_load_balancer ( vpc_conn,\n aws.ec2_conn( ),\n aws.elb_conn( ),\n aws.clw_conn( ),\n aws.r53_conn( ),\n vpc,\n base_name,\n aws.base_topicarn( ),\n None,\n load_balancer_params,\n False )\n\n for sns_params in params.get( 'sns-topics', [] ) :\n process_sns_topic( aws.sns_conn( ), base_name, sns_params )\n\n for action_params in params.get( 'actions', [] ) :\n print action_params[ 'type' ] \n if action_params[ 'type' ] == 'update-os' :\n update_instances_os( aws.ec2_conn( ), vpc, base_name, action_params.get( 'restart', 'NO' ) == 'YES' )\n elif action_params[ 'type' ] == 'init-mongo' :\n install_mongo( vpc_conn, aws.ec2_conn( ), aws.clw_conn( ),vpc, base_name, aws_account_type, aws.base_topicarn( ), action_params )\n elif action_params[ 'type' ] == 'init-rabbit' :\n install_rabbit(vpc_conn, aws.ec2_conn( ), aws.elb_conn( ), aws.clw_conn( ), aws.r53_conn( ),vpc, base_name, aws_account_type, aws.base_topicarn( ), action_params )\n elif action_params[ 'type' ] == 'init-logstash' :\n install_logstash(vpc_conn, aws.ec2_conn( ), aws.elb_conn( ), aws.clw_conn( ), aws.r53_conn( ),vpc, base_name, aws_account_type, aws.base_topicarn( ), action_params )\n elif action_params[ 'type' ] == 'init-logstash-ui' :\n install_logstash_ui(vpc_conn, aws.ec2_conn( ), aws.elb_conn( ), aws.clw_conn( ), aws.r53_conn( ),vpc, base_name, aws_account_type, aws.base_topicarn( ), action_params )\n elif action_params[ 'type' ] == 'configure-rabbitmq' :\n configure_rabbit(aws.ec2_conn( ), base_name,action_params.get(\"rabbitmq\") )\n elif action_params[ 'type' ] == 'load-mongo-configuration' :\n load_mongo_configuration(aws.ec2_conn( ), base_name,action_params.get(\"mongodb\") )\n elif action_params[ 'type' ] == 'send_rabbit_message' :\n send_rabbit_message(action_params.get(\"rabbitmq\") )\n elif action_params[ 'type' ] == 'shutdown' :\n print \"Shutting down all instances in vpc \" + get_vpc_name( base_name )\n shutdown_vpc( aws.ec2_conn( ), vpc, action_params.get( \"exceptions\" ) )\n print \"Done\"\n elif action_params[ 'type' ] == 'startup' :\n print \"Starting up all instances in vpc \" + get_vpc_name( base_name )\n startup_vpc( aws.ec2_conn( ), aws.elb_conn( ), vpc, action_params.get( \"exceptions\" ) )\n print \"Done\"\n elif action_params[ 'type' ] == 'delete-vpc' :\n print \"Deleting VPC \" + vpc.tags[ 'Name' ] + \"!\"\n delete_vpc( aws.vpc_conn( ), aws.ec2_conn( ), aws.elb_conn( ), aws.sns_conn( ), aws.clw_conn( ), vpc, base_name, action_params )\n elif action_params[ 'type' ] == 'create-s3-dropbox' :\n process_create_s3_dropbox ( aws_account_type, aws.s3_data_conn( ), aws.iam_conn( ), base_name, action_params )\n elif action_params[ 'type' ] == 'create-storage-repository' :\n process_create_storage_repository ( aws_account_type, aws.s3_data_conn( ), aws.iam_conn( ), base_name, action_params )\n elif action_params[ 'type' ] == 'create-dropbox-client-account' :\n process_create_dropbox_client_account( aws.iam_conn( ), base_name, action_params )", "def vpc(template, name, cidr='10.0.0.0/16'):\n v = VPC(name, template=template)\n v.CidrBlock = cidr\n v.EnableDnsSupport = True\n v.EnableDnsHostnames = True\n v.Tags = Tags(Name=aws_name(v.title))\n return v", "def get_vpc_name ( base_name ) :\n return base_name + '-VPC'", "def get_vpcs(self):\n class_query = ClassQuery('fabricProtPathEpCont')\n vpc_containers = self.moDir.query(class_query)\n vpc_list = []\n for container in vpc_containers:\n for vdc in self.query_child_objects(str(container.dn)):\n vpc_list.append(vdc)\n return vpc_list", "def vpc_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc(self, region, stack):\n region_name = region['region_name']\n stack_params = self._vpc_params(region)\n template = self._template('vpc')\n stack_hash = sha256(template, stack_params)\n if self._complete(stack, stack_hash):\n logger.debug('VPC stack complete in %s.', region_name)\n return None\n stack_name = 'flotilla-{0}-vpc'.format(self._environment)\n\n template = self._setup_azs(stack_params, template)\n\n new_stack = self._stack(region_name, stack_name, template, stack_params)\n stack_outputs = {o.key: o.value for o in new_stack.outputs if o.value}\n return {'stack_arn': new_stack.stack_id,\n 'region': region_name,\n 'outputs': stack_outputs,\n 'stack_hash': stack_hash}", "def func_vpc(self):\n return self._func_vpc", "def vpc_id(self) -> Optional[str]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"vpc_id\")", "def vpc(self, region, stack):\n region_name = region['region_name']\n stack_params = self._vpc_params(region)\n template = self._template('vpc')\n stack_hash = sha256(template, stack_params)\n if self._complete(stack, stack_hash):\n logger.debug('VPC stack complete in %s.', region_name)\n return None\n stack_name = 'flotilla-{0}-vpc'.format(self._environment)\n\n new_stack = self._stack(region_name, stack_name, template, stack_params)\n stack_outputs = {o.key: o.value for o in new_stack.outputs}\n return {'stack_arn': new_stack.stack_id,\n 'region': region_name,\n 'outputs': stack_outputs,\n 'stack_hash': stack_hash}", "def vpc_id(self):\n return self._vpc_id", "def get_virtual_servers(configuration: Configuration,\r\n resource_group_id: str = None,\r\n name: str = None,\r\n vpc_id: str = None,\r\n vpc_name: str = None,\r\n vpc_crn: str = None) -> Dict[str, Any]:\r\n service = create_ibmcloud_api_client(configuration)\r\n try:\r\n instances = \\\r\n service.list_instances(resource_group_id=resource_group_id, name=name, vpc_id=vpc_id, vpc_crn=vpc_crn,\r\n vpc_name=vpc_name).get_result()['instances']\r\n except ApiException as e:\r\n logger.error(\"List instances failed with status code \" +\r\n str(e.code) + \": \" + e.message)\r\n return instances", "def vpc_uuid(self) -> str:\n return pulumi.get(self, \"vpc_uuid\")", "def test_transform_and_load_vpcs(neo4j_session):\n vpc_res = tests.data.gcp.compute.VPC_RESPONSE\n vpc_list = cartography.intel.gcp.compute.transform_gcp_vpcs(vpc_res)\n cartography.intel.gcp.compute.load_gcp_vpcs(neo4j_session, vpc_list, TEST_UPDATE_TAG)\n\n query = \"\"\"\n MATCH(vpc:GCPVpc{id:$VpcId})\n RETURN vpc.id, vpc.partial_uri, vpc.auto_create_subnetworks\n \"\"\"\n expected_vpc_id = 'projects/project-abc/global/networks/default'\n nodes = neo4j_session.run(\n query,\n VpcId=expected_vpc_id,\n )\n actual_nodes = {(n['vpc.id'], n['vpc.partial_uri'], n['vpc.auto_create_subnetworks']) for n in nodes}\n expected_nodes = {\n (expected_vpc_id, expected_vpc_id, True),\n }\n assert actual_nodes == expected_nodes", "def get_vpc_gcp_info_output(instance_id: Optional[pulumi.Input[Optional[int]]] = None,\n vpc_id: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetVpcGcpInfoResult]:\n ...", "def add_vpc(template, cidr):\n global num_vpcs\n num_vpcs += 1\n vpc_title = \"VPC\" + str(num_vpcs)\n\n vpc = template.add_resource(ec2.VPC(vpc_title,\n CidrBlock=cidr,\n Tags=Tags(Name=name_tag(vpc_title),\n Environment=ENVIRONMENT_NAME)))\n return vpc", "def vpc_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"vpc_id\")", "def vpc_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"vpc_id\")", "def __setup_template(self):\n template = Template()\n template.add_description(\"Service VPC - used for services\")\n\n template.add_metadata({\n \"Build\": \"development\",\n \"DependsOn\": [],\n \"Environment\": \"ApiDev\",\n \"Revision\": \"develop\",\n \"StackName\": \"ApiDev-Dev-VPC\",\n \"StackType\": \"InfrastructureResource\",\n \"TemplateBucket\": \"cfn-apidev\",\n \"TemplateName\": \"VPC\",\n \"TemplatePath\": \"ApiDev/Dev/VPC\"\n })\n\n vpc = template.add_resource(\n ec2.VPC(\n \"VPC\",\n CidrBlock=\"10.0.0.0/16\",\n EnableDnsHostnames=\"true\",\n EnableDnsSupport=\"true\",\n InstanceTenancy=\"default\",\n Tags=self.__get_tags(\"ServiceVPC\"),\n )\n )\n\n instance_sg = template.add_resource(\n ec2.SecurityGroup(\n \"BastionSG\",\n GroupDescription=\"Used for source/dest rules\",\n Tags=self.__get_tags(\"VPC-Bastion-SG\"),\n VpcId=Ref(\n vpc\n )\n ),\n )\n\n cw_alarm_topic = template.add_resource(\n Topic(\n \"CloudWatchAlarmTopic\",\n TopicName=\"ApiDev-Dev-CloudWatchAlarms\",\n )\n )\n\n dhcp_options = template.add_resource(\n ec2.DHCPOptions(\n \"DhcpOptions\",\n DomainName=Join(\n \"\",\n [\n Ref(\"AWS::Region\"),\n \".compute.internal\"\n ]\n ),\n DomainNameServers=[\"AmazonProvidedDNS\"],\n Tags=self.__get_tags(\"DhcpOptions\"),\n )\n )\n\n gateway = template.add_resource(\n ec2.InternetGateway(\n \"InternetGateway\",\n Tags=self.__get_tags(\"InternetGateway\")\n )\n )\n\n nat_emergency_topic = template.add_resource(\n Topic(\n \"NatEmergencyTopic\",\n TopicName=\"ApiDev-Dev-NatEmergencyTopic\",\n )\n )\n\n vpc_dhcp_options_assoc = template.add_resource(\n ec2.VPCDHCPOptionsAssociation(\n \"VpcDhcpOptionsAssociation\",\n DhcpOptionsId=Ref(\n dhcp_options\n ),\n VpcId=Ref(\n vpc\n )\n )\n )\n\n vpc_gw_attachment = template.add_resource(\n ec2.VPCGatewayAttachment(\n \"VpcGatewayAttachment\",\n InternetGatewayId=Ref(\n gateway\n ),\n VpcId=Ref(\n vpc\n )\n )\n )\n\n vpc_network_acl = template.add_resource(\n ec2.NetworkAcl(\n \"VpcNetworkAcl\",\n Tags=self.__get_tags(\"NetworkAcl\"),\n VpcId=Ref(\n vpc\n )\n )\n )\n\n vpc_network_acl_rules = template.add_resource([\n ec2.NetworkAclEntry(\n \"VpcNetworkAclInboundRulePublic443\",\n CidrBlock=\"0.0.0.0/0\",\n Egress=\"false\",\n NetworkAclId=Ref(\n vpc_network_acl\n ),\n PortRange=ec2.PortRange(\n From=\"443\",\n To=\"443\",\n ),\n Protocol=\"6\",\n RuleAction=\"allow\",\n RuleNumber=20001\n ),\n ec2.NetworkAclEntry(\n \"VpcNetworkAclInboundRulePublic80\",\n CidrBlock=\"0.0.0.0/0\",\n Egress=\"false\",\n NetworkAclId=Ref(\n vpc_network_acl\n ),\n PortRange=ec2.PortRange(\n From=\"80\",\n To=\"80\",\n ),\n Protocol=\"6\",\n RuleAction=\"allow\",\n RuleNumber=20000\n ),\n ec2.NetworkAclEntry(\n \"VpcNetworkAclOutboundRule\",\n CidrBlock=\"0.0.0.0/0\",\n Egress=\"true\",\n NetworkAclId=Ref(\n vpc_network_acl\n ),\n Protocol=\"-1\",\n RuleAction=\"allow\",\n RuleNumber=30000\n ),\n ec2.NetworkAclEntry(\n \"VpcNetworkAclSsh\",\n CidrBlock=\"127.0.0.1/32\",\n Egress=\"false\",\n NetworkAclId=Ref(\n vpc_network_acl\n ),\n PortRange=ec2.PortRange(\n From=\"22\",\n To=\"22\",\n ),\n Protocol=\"6\",\n RuleAction=\"allow\",\n RuleNumber=10000\n )\n ])\n\n template.add_output([\n Output(\n \"BastionSG\",\n Value=Ref(instance_sg)\n ),\n Output(\n \"CloudWatchAlarmTopic\",\n Value=Ref(cw_alarm_topic)\n ),\n Output(\n \"InternetGateway\",\n Value=Ref(gateway)\n ),\n Output(\n \"NatEmergencyTopicARN\",\n Value=Ref(nat_emergency_topic)\n ),\n Output(\n \"VPCID\",\n Value=Ref(vpc)\n ),\n Output(\n \"VPCName\",\n Value=Ref(\"AWS::StackName\")\n ),\n Output(\n \"VpcNetworkAcl\",\n Value=Ref(vpc_network_acl)\n )\n\n ])\n\n return template", "def vpc_config(self) -> pulumi.Output[Optional['outputs.CanaryVpcConfig']]:\n return pulumi.get(self, \"vpc_config\")", "def create_vpc ( vpc_conn,\n ec2_conn,\n cloudwatch_conn,\n r53_conn,\n aws_account_type,\n region_name,\n base_topicarn,\n base_name,\n params ) :\n nat_subnet = None\n \n # Create VPC\n vpc_name = get_vpc_name( base_name )\n print \"Creating VPC with name \" + vpc_name\n vpc = vpc_conn.create_vpc('10.0.0.0/16')\n \n print \"Waiting for VPC to be created\"\n aws_wait( vpc_conn.get_all_vpcs, vpc.id )\n aws_cmd( ec2_conn.create_tags, [ vpc.id, { \"Name\": vpc_name } ] )\n vpc_conn.modify_vpc_attribute( vpc_id = vpc.id, enable_dns_support = True )\n vpc_conn.modify_vpc_attribute( vpc_id = vpc.id, enable_dns_hostnames = True )\n \n # Create Internate Gateway and attache to new VPC\n print \"Creating Internet Gateway and attaching to new VPC\"\n ig_name = get_ig_name( base_name )\n igw = vpc_conn.create_internet_gateway( )\n aws_wait( vpc_conn.get_all_internet_gateways, igw.id )\n vpc_conn.attach_internet_gateway( igw.id, vpc.id )\n aws_cmd( ec2_conn.create_tags, [ igw.id, { \"Name\": ig_name } ] )\n \n # Create public Route table \n print \"Creating Public Route table and attaching to new VPC\"\n public_rt_name = get_rt_name (base_name, 'PUBLIC')\n public_rt = vpc_conn.create_route_table( vpc.id )\n aws_wait( vpc_conn.get_all_route_tables, public_rt.id )\n aws_cmd( ec2_conn.create_tags, [ public_rt.id, { \"Name\": public_rt_name } ] )\n \n # Create private Route table\n print \"Creating Private Route table and attaching to new VPC\"\n private_rt_name = get_rt_name (base_name, 'PRIVATE')\n private_rt = vpc_conn.create_route_table( vpc.id )\n aws_wait( vpc_conn.get_all_route_tables, private_rt.id )\n aws_cmd( ec2_conn.create_tags, [ private_rt.id, { \"Name\": private_rt_name } ] )\n \n # Create route for everything through the igw\n print \"Attaching new Internet Gateway to the public routing table\"\n all_cidr = \"0.0.0.0/0\"\n vpc_conn.create_route( public_rt.id, all_cidr, gateway_id = igw.id )\n\n # Get the list of all the zones\n zones = ec2_conn.get_all_zones( )\n zone_mappings = {}\n print \"Creating public, private, and static subnets for each zone.\"\n \n idx = 0 \n zone_static_subnets = subnet_cidrs[ 'STATIC' ]\n for zone in zones:\n zm = zone_mapping( )\n zm.zone_letter = zone.name[-1:].upper( )\n zm.public_subnet = create_subnet( vpc_conn,\n ec2_conn,\n vpc.id,\n zone_public_subnet_map[ zm.zone_letter ],\n zone.name,\n base_name + \"-PUBLIC\" )\n \n zm.private_subnet = create_subnet( vpc_conn,\n ec2_conn,\n vpc.id,\n zone_private_subnet_map[ zm.zone_letter ],\n zone.name,\n base_name + \"-PRIVATE\" )\n \n zm.static_subnet = create_subnet( vpc_conn,\n ec2_conn,\n vpc.id,\n zone_static_subnets[ idx ],\n zone.name,\n base_name + \"-STATIC\" )\n idx = idx + 1\n\n # Map the public subnet to the public routing table.\n vpc_conn.associate_route_table( public_rt.id, zm.public_subnet.id )\n \n # Map the private subnet to the private routing table.\n vpc_conn.associate_route_table( private_rt.id, zm.private_subnet.id )\n \n # Map the static subnet to the private routing table.\n vpc_conn.associate_route_table( private_rt.id, zm.static_subnet.id )\n\n if not nat_subnet:\n nat_subnet = zm.public_subnet\n\n zone_mappings[ zone.name ] = zm\n \n nat = create_nat ( vpc_conn = vpc_conn,\n ec2_conn = ec2_conn,\n cloudwatch_conn = cloudwatch_conn,\n r53_conn = r53_conn,\n aws_account_type = aws_account_type,\n region_name = region_name,\n vpc = vpc,\n base_name = base_name,\n base_topicarn = base_topicarn,\n zone_mapping_list = [ zone_mappings[ key ] for key in zone_mappings ],\n private_rt = private_rt,\n nat_subnet = nat_subnet,\n secgrp_rules = nat_secgrp_rules,\n monitor_rules = nat_monitor_rules )\n return vpc", "def sg_lookup_all(session, vpc_id):\n if session is None:\n return NoneDict()\n\n client = session.client('ec2')\n response = client.describe_security_groups(Filters=[{\"Name\": \"vpc-id\", \"Values\": [vpc_id]}])\n\n if len(response['SecurityGroups']) == 0:\n return NoneDict()\n else:\n sgs = NoneDict()\n for sg in response['SecurityGroups']:\n key = _find(sg.get('Tags', []), lambda x: x[\"Key\"] == \"Name\")\n if key:\n key = key['Value']\n sgs[key] = sg['GroupId']\n\n return sgs", "def vpc_config(self) -> Optional[pulumi.Input['CanaryVpcConfigArgs']]:\n return pulumi.get(self, \"vpc_config\")", "def get_aws(verbosity, resultset, providerversion):\n try:\n response = requests.get(AWSAPIURL)\n if verbosity:\n print(response.status_code)\n if response.status_code == 200:\n cidrdata = json.loads(response.content)\n providerversion[\"AWS\"] = cidrdata[\"createDate\"]+\" \"+cidrdata[\"syncToken\"]\n for i in range(0, len(cidrdata[\"prefixes\"])):\n if cidrdata[\"prefixes\"][i][\"ip_prefix\"] not in resultset:\n resultset[cidrdata[\"prefixes\"][i][\"ip_prefix\"]] = \"AWS\"\n for i in range(0, len(cidrdata[\"ipv6_prefixes\"])):\n if cidrdata[\"ipv6_prefixes\"][i][\"ipv6_prefix\"] not in resultset:\n resultset[cidrdata[\"ipv6_prefixes\"][i][\"ipv6_prefix\"]] = \"AWS\"\n except Exception as get_exception:\n print(\"Exception\")\n print(get_exception)\n\n return resultset, providerversion", "def data_source_info(self) -> 'outputs.DatasourceResponse':\n return pulumi.get(self, \"data_source_info\")", "def __init__(__self__, *,\n vpc_id: pulumi.Input[str],\n zone_id: pulumi.Input[str],\n vpc_region: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"vpc_id\", vpc_id)\n pulumi.set(__self__, \"zone_id\", zone_id)\n if vpc_region is not None:\n pulumi.set(__self__, \"vpc_region\", vpc_region)", "def delete_vpc(self, vpc_dn):\n vpc_mo = self.moDir.lookupByDn(vpc_dn)\n # Filters all infraAccBndlGrp objectsin memory looking for the ones that\n # has the vpc name and then select the first in the list\n AccBndlGrp_mo = filter(lambda x: x.name == vpc_mo.name, self.moDir.query(ClassQuery('infraAccBndlGrp')))[0]\n # Delete policy group\n AccBndlGrp_mo.delete()\n self.commit(AccBndlGrp_mo)\n # Filters all infraAccPortP objects in memory looking for the ones that\n # has the vpc name\n AccPortP_mo_list = filter(\n lambda x: vpc_mo.name + VPC in x.name, self.moDir.query(ClassQuery('infraAccPortP'))\n )\n for AccPortP_mo in AccPortP_mo_list:\n # Delete interface profiles\n AccPortP_mo.delete()\n self.commit(AccPortP_mo)\n\n # Filters all infraNodeP objects in memory looking for the ones that\n # has the vpc name\n NodeP_mo_list = filter(\n lambda x: vpc_mo.name in x.name + VPC, self.moDir.query(ClassQuery('infraNodeP'))\n )\n for NodeP_mo in NodeP_mo_list:\n # Delete switch profile\n NodeP_mo.delete()\n self.commit(NodeP_mo)", "def get_vpc_gcp_info(instance_id: Optional[int] = None,\n vpc_id: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVpcGcpInfoResult:\n __args__ = dict()\n __args__['instanceId'] = instance_id\n __args__['vpcId'] = vpc_id\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('cloudamqp:index/getVpcGcpInfo:getVpcGcpInfo', __args__, opts=opts, typ=GetVpcGcpInfoResult).value\n\n return AwaitableGetVpcGcpInfoResult(\n id=__ret__.id,\n instance_id=__ret__.instance_id,\n name=__ret__.name,\n network=__ret__.network,\n vpc_id=__ret__.vpc_id,\n vpc_subnet=__ret__.vpc_subnet)", "def create_vpc(ec2):\n # create a new VPC\n print(\"\\n===Creating VPC...\")\n vpc = ec2.create_vpc(CidrBlock=VPC_CIDR_BLOCK,\n TagSpecifications=[{\"ResourceType\": \"vpc\",\n \"Tags\":[{\"Key\": \"Name\", \n \"Value\": VPC_NAME},\n ]\n }])\n \n # wait till available and return VPC ID\n vpc.wait_until_available()\n print(f\"===VPC {VPC_NAME} is available!\")\n return vpc", "def vpc_region(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"vpc_region\")", "def __init__(__self__, *,\n vpc_id: Optional[pulumi.Input[str]] = None,\n vpc_region: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None):\n if vpc_id is not None:\n pulumi.set(__self__, \"vpc_id\", vpc_id)\n if vpc_region is not None:\n pulumi.set(__self__, \"vpc_region\", vpc_region)\n if zone_id is not None:\n pulumi.set(__self__, \"zone_id\", zone_id)", "def _get_vpc_id(tag_prefix, ec2_client=None, region_name=None):\n if not ec2_client:\n ec2_client = boto3.client('ec2', region_name=region_name)\n vpc_id = None\n vpc_cidr = None\n LOGGER.debug(\"ec2_client.describe_vpcs(Filters=[{'Name': 'tag:Prefix',\"\\\n \" 'Values': ['%s']}])\", tag_prefix)\n resp = ec2_client.describe_vpcs(\n Filters=[{'Name': 'tag:Prefix', 'Values': [tag_prefix]}])\n if resp['Vpcs']:\n vpc_data = resp['Vpcs'][0]\n vpc_id = vpc_data['VpcId']\n vpc_cidr = vpc_data['CidrBlock']\n LOGGER.info(\"%s found VPC %s covering cidr block %s\",\n tag_prefix, vpc_id, vpc_cidr)\n return vpc_id, vpc_cidr", "def vpc_cleanup(vpcid, region):\n logging.info(f'Removing VPC __{vpcid}__ from AWS')\n ec2_resource = boto3.resource('ec2', region_name=region)\n ec2_client = ec2_resource.meta.client\n vpc = ec2_resource.Vpc(vpcid)\n #pdb.set_trace()\n try:\n for subnet in vpc.subnets.all():\n for instance in subnet.instances.all():\n try:\n logging.info(f\"Terminating the instance __{instance.id}__ associated with the vpc __{vpc.id}__\")\n instance.terminate()\n logging.info(\"Waiting for 60 seconds to let the instance be terminated\")\n sleep(60)\n except ClientError as e:\n logging.error(e.response['Error']['Message'])\n\n for subnet in vpc.subnets.all():\n for interface in subnet.network_interfaces.all():\n try:\n logging.info(f\"Deleting the ENI __{interface.id}__ associated with the vpc __{vpc.id}__\")\n interface.delete()\n except ClientError as e:\n logging.error(e.response['Error']['Message'])\n try:\n logging.info(f\"Deleting the ENI __{subnet.id}__ associated with the vpc __{vpcid}__\")\n subnet.delete()\n except ClientError as e:\n logging.error(e.response['Error']['Message'])\n\n for gw in vpc.internet_gateways.all():\n try:\n logging.info(f\"detaching and deleting Internet Gateway __{gw.id}__ associated with the vpc __{vpc.id}__\")\n vpc.detach_internet_gateway(InternetGatewayId=gw.id)\n gw.delete()\n except ClientError as e:\n logging.error(e.response['Error']['Message'])\n\n for requested_peer in vpc.requested_vpc_peering_connections.all():\n try:\n logging.info(f\"Deleting VPC Peering Connection __{requested_peer.id}__ associated with the vpc __{vpc.id}__\")\n requested_peer.delete()\n except ClientError as e:\n logging.error(e.response['Error']['Message'])\n\n for accepted_peer in vpc.accepted_vpc_peering_connections.all():\n try:\n logging.info(f\"Deleting VPC Peering Connection __{accepted_peer.id}__ associated with the vpc __{vpc.id}__\")\n accepted_peer.delete()\n except ClientError as e:\n logging.error(e.response['Error']['Message'])\n\n for sg in vpc.security_groups.all():\n if sg.group_name != 'default':\n try:\n logging.info(f\"Deleting the Security Group __{sg.id}__ associated with the vpc __{vpc.id}__\")\n sg.delete()\n except ClientError as e:\n logging.error(e.response['Error']['Message'])\n\n route_tables = vpc.route_tables.all()\n for route_table in route_tables:\n for route in route_table.routes:\n if route.origin == 'CreateRoute':\n try:\n logging.info(f\"Deleting the routes from __{route_table.id}__ of vpc __{vpc.id}__\")\n print(route)\n route.delete()\n except ClientError as e:\n logging.error(e.response['Error']['Message'])\n for association in route_table.associations:\n if not association.main:\n try:\n logging.info(f\"Disassociating and Deleting the non-main route table __{route_table.id}__ of the vpc __{vpc.id}__\")\n association.delete()\n except ClientError as e:\n logging.error(e.response['Error']['Message'])\n for route_table in route_tables:\n if not route_table.associations: # If Empty\n try:\n logging.info(f\"Deleting the route table __{route_table.id}__ of vpc __{vpc.id}__\")\n route_table.delete()\n except ClientError as e:\n logging.error(e.response['Error']['Message'])\n\n for endpoint in ec2_client.describe_vpc_endpoints(Filters=[{\n 'Name': 'vpc-id',\n 'Values': [vpcid]\n }])['VpcEndpoints']:\n try:\n logging.info(f\"Deleting the VPC Endpoint __{endpoint['VpcEndpointId']}__ of the vpc __{vpc.id}__\")\n ec2_client.delete_vpc_endpoints(VpcEndpointIds=[endpoint['VpcEndpointId']])\n except ClientError as e:\n logging.error(e.response['Error']['Message'])\n\n try:\n logging.info(f\"All the Dependencies of the VPC __{vpc.id}__ are being removed. Proceeding on to Deleting the VPC\")\n ec2_client.delete_vpc(VpcId=vpc.id)\n except ClientError as e:\n logging.error(e.response['Error']['Message'])\n except ClientError as e:\n logging.error(e.response['Error']['Message'])", "def list_instances_detail(self):\n\n # TODO(imsplitbit): need to ask around if this is the best way to do\n # this. This causes some redundant vzlist commands as get_info is run\n # on every item returned from this command but it didn't make sense\n # to re-implement get_info as get_info_all.\n infos = []\n try:\n # get a list of CT names which will be nova friendly.\n # NOTE: This can be an issue if nova decides to change\n # the format of names. We would need to have a migration process\n # to change the names in the name field of the CTs.\n out, err = utils.execute('sudo', 'vzlist', '--all', '-o',\n 'name', '-H')\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Problem listing Vzs')\n\n for name in out.splitlines():\n name = name.split()[0]\n status = self.get_info(name)\n infos.append(driver.InstanceInfo(name, status['state']))\n\n return infos", "def seed_aws_data(ctx, data):\n swag = create_swag_from_ctx(ctx)\n for k, v in json.loads(data.read()).items():\n for account in v['accounts']:\n data = {\n 'description': 'This is an AWS owned account used for {}'.format(k),\n 'id': account['account_id'],\n 'contacts': [],\n 'owner': 'aws',\n 'provider': 'aws',\n 'sensitive': False,\n 'email': 'support@amazon.com',\n 'name': k + '-' + account['region']\n }\n\n click.echo(click.style(\n 'Seeded Account. AccountName: {}'.format(data['name']), fg='green')\n )\n\n swag.create(data)", "def aws_elasticsearch_in_vpc_check(cache: dict, session, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:\n # ISO Time\n iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()\n for response in describe_es_os_domains(cache, session):\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(response,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n esDomainName = response[\"DomainStatus\"][\"DomainName\"]\n esVersion = response[\"DomainStatus\"][\"ElasticsearchVersion\"]\n domainId = response[\"DomainStatus\"][\"DomainId\"]\n domainArn = response[\"DomainStatus\"][\"ARN\"]\n try:\n vpcId = str(response[\"VPCOptions\"][\"VPCId\"])\n except KeyError:\n vpcId = \"NO_VPC\"\n # This is a failing check\n if vpcId == \"NO_VPC\":\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{domainArn}/elasticsearch-in-vpc-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": domainArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices\",\n \"Effects/Data Exposure\"\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"HIGH\"},\n \"Confidence\": 99,\n \"Title\": \"[OpenSearch.8] OpenSearch/AWS ElasticSearch Service domains should be in a VPC\",\n \"Description\": \"OpenSearch/AWS ElasticSearch Service domain \"\n + esDomainName\n + \" is not in a VPC, Placing an Amazon ES domain within a VPC enables secure communication between Amazon ES and other services within the VPC without the need for an internet gateway, NAT device, or VPN connection. All traffic remains securely within the AWS Cloud. Because of their logical isolation, domains that reside within a VPC have an extra layer of security when compared to domains that use public endpoints. Refer to the remediation instructions if this configuration is not intended.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For information on placing Domains in a VPC refer to the Launching your Amazon OpenSearch/AWS ElasticSearch Service domains using a VPC section of the Amazon Elasticsearch Service Developer Guide\",\n \"Url\": \"https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html\"\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Analytics\",\n \"AssetService\": \"Amazon OpenSearch Service\",\n \"AssetComponent\": \"Search Domain\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsOpenSearchServiceDomain\",\n \"Id\": domainArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsOpenSearchServiceDomain\": {\n \"Id\": domainId,\n \"DomainName\": esDomainName,\n \"EngineVersion\": esVersion\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.AC-3\",\n \"NIST CSF V1.1 PR.AC-4\",\n \"NIST CSF V1.1 PR.DS-5\",\n \"NIST SP 800-53 Rev. 4 AC-1\",\n \"NIST SP 800-53 Rev. 4 AC-2\",\n \"NIST SP 800-53 Rev. 4 AC-3\",\n \"NIST SP 800-53 Rev. 4 AC-4\",\n \"NIST SP 800-53 Rev. 4 AC-5\",\n \"NIST SP 800-53 Rev. 4 AC-6\",\n \"NIST SP 800-53 Rev. 4 AC-14\",\n \"NIST SP 800-53 Rev. 4 AC-16\",\n \"NIST SP 800-53 Rev. 4 AC-17\",\n \"NIST SP 800-53 Rev. 4 AC-19\",\n \"NIST SP 800-53 Rev. 4 AC-20\",\n \"NIST SP 800-53 Rev. 4 AC-24\",\n \"NIST SP 800-53 Rev. 4 PE-19\",\n \"NIST SP 800-53 Rev. 4 PS-3\",\n \"NIST SP 800-53 Rev. 4 PS-6\",\n \"NIST SP 800-53 Rev. 4 SC-7\",\n \"NIST SP 800-53 Rev. 4 SC-8\",\n \"NIST SP 800-53 Rev. 4 SC-13\",\n \"NIST SP 800-53 Rev. 4 SC-15\",\n \"NIST SP 800-53 Rev. 4 SC-31\",\n \"NIST SP 800-53 Rev. 4 SI-4\",\n \"AICPA TSC CC6.3\",\n \"AICPA TSC CC6.6\",\n \"AICPA TSC CC7.2\",\n \"ISO 27001:2013 A.6.1.2\",\n \"ISO 27001:2013 A.6.2.1\",\n \"ISO 27001:2013 A.6.2.2\",\n \"ISO 27001:2013 A.7.1.1\",\n \"ISO 27001:2013 A.7.1.2\",\n \"ISO 27001:2013 A.7.3.1\",\n \"ISO 27001:2013 A.8.2.2\",\n \"ISO 27001:2013 A.8.2.3\",\n \"ISO 27001:2013 A.9.1.1\",\n \"ISO 27001:2013 A.9.1.2\",\n \"ISO 27001:2013 A.9.2.3\",\n \"ISO 27001:2013 A.9.4.1\",\n \"ISO 27001:2013 A.9.4.4\",\n \"ISO 27001:2013 A.9.4.5\",\n \"ISO 27001:2013 A.10.1.1\",\n \"ISO 27001:2013 A.11.1.4\",\n \"ISO 27001:2013 A.11.1.5\",\n \"ISO 27001:2013 A.11.2.1\",\n \"ISO 27001:2013 A.11.2.6\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.1.3\",\n \"ISO 27001:2013 A.13.2.1\",\n \"ISO 27001:2013 A.13.2.3\",\n \"ISO 27001:2013 A.13.2.4\",\n \"ISO 27001:2013 A.14.1.2\",\n \"ISO 27001:2013 A.14.1.3\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{domainArn}/elasticsearch-in-vpc-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": domainArn,\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices\",\n \"Effects/Data Exposure\"\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": \"[OpenSearch.8] OpenSearch/AWS ElasticSearch Service domains should be in a VPC\",\n \"Description\": \"OpenSearch/AWS ElasticSearch Service domain \"\n + esDomainName\n + \" is in a VPC.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"For information on placing Domains in a VPC refer to the Launching your Amazon OpenSearch/AWS ElasticSearch Service domains using a VPC section of the Amazon Elasticsearch Service Developer Guide\",\n \"Url\": \"https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html\"\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"AWS\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": awsAccountId,\n \"AssetRegion\": awsRegion,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Analytics\",\n \"AssetService\": \"Amazon OpenSearch Service\",\n \"AssetComponent\": \"Search Domain\"\n },\n \"Resources\": [\n {\n \"Type\": \"AwsOpenSearchServiceDomain\",\n \"Id\": domainArn,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"AwsOpenSearchServiceDomain\": {\n \"Id\": domainId,\n \"DomainName\": esDomainName,\n \"EngineVersion\": esVersion\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.AC-3\",\n \"NIST CSF V1.1 PR.AC-4\",\n \"NIST CSF V1.1 PR.DS-5\",\n \"NIST SP 800-53 Rev. 4 AC-1\",\n \"NIST SP 800-53 Rev. 4 AC-2\",\n \"NIST SP 800-53 Rev. 4 AC-3\",\n \"NIST SP 800-53 Rev. 4 AC-4\",\n \"NIST SP 800-53 Rev. 4 AC-5\",\n \"NIST SP 800-53 Rev. 4 AC-6\",\n \"NIST SP 800-53 Rev. 4 AC-14\",\n \"NIST SP 800-53 Rev. 4 AC-16\",\n \"NIST SP 800-53 Rev. 4 AC-17\",\n \"NIST SP 800-53 Rev. 4 AC-19\",\n \"NIST SP 800-53 Rev. 4 AC-20\",\n \"NIST SP 800-53 Rev. 4 AC-24\",\n \"NIST SP 800-53 Rev. 4 PE-19\",\n \"NIST SP 800-53 Rev. 4 PS-3\",\n \"NIST SP 800-53 Rev. 4 PS-6\",\n \"NIST SP 800-53 Rev. 4 SC-7\",\n \"NIST SP 800-53 Rev. 4 SC-8\",\n \"NIST SP 800-53 Rev. 4 SC-13\",\n \"NIST SP 800-53 Rev. 4 SC-15\",\n \"NIST SP 800-53 Rev. 4 SC-31\",\n \"NIST SP 800-53 Rev. 4 SI-4\",\n \"AICPA TSC CC6.3\",\n \"AICPA TSC CC6.6\",\n \"AICPA TSC CC7.2\",\n \"ISO 27001:2013 A.6.1.2\",\n \"ISO 27001:2013 A.6.2.1\",\n \"ISO 27001:2013 A.6.2.2\",\n \"ISO 27001:2013 A.7.1.1\",\n \"ISO 27001:2013 A.7.1.2\",\n \"ISO 27001:2013 A.7.3.1\",\n \"ISO 27001:2013 A.8.2.2\",\n \"ISO 27001:2013 A.8.2.3\",\n \"ISO 27001:2013 A.9.1.1\",\n \"ISO 27001:2013 A.9.1.2\",\n \"ISO 27001:2013 A.9.2.3\",\n \"ISO 27001:2013 A.9.4.1\",\n \"ISO 27001:2013 A.9.4.4\",\n \"ISO 27001:2013 A.9.4.5\",\n \"ISO 27001:2013 A.10.1.1\",\n \"ISO 27001:2013 A.11.1.4\",\n \"ISO 27001:2013 A.11.1.5\",\n \"ISO 27001:2013 A.11.2.1\",\n \"ISO 27001:2013 A.11.2.6\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.1.3\",\n \"ISO 27001:2013 A.13.2.1\",\n \"ISO 27001:2013 A.13.2.3\",\n \"ISO 27001:2013 A.13.2.4\",\n \"ISO 27001:2013 A.14.1.2\",\n \"ISO 27001:2013 A.14.1.3\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding", "def ssh_call_vpc ( ec2_conn, base_name, instances, cmd, halt_on_error = True ) :\n nat_instance = find_instance_by_type( ec2_conn, base_name, 'NAT' )\n nat_keyfile = os.environ[ 'HOME' ] + '/.ssh/' + nat_instance.key_name + '.pem'\n call_nat = False\n error_occurred = False\n for instance in instances :\n if instance.id == nat_instance.id :\n call_nat = True\n continue\n instance_keyfile = '.ssh/' + instance.key_name + '.pem'\n retval = ssh_call( nat_instance.ip_address,\n nat_keyfile,\n \"ssh -tt \" + ssh_opt + \" -i \" + instance_keyfile + \" \" + instance.private_ip_address + \" '\" + cmd + \"'\" )\n if retval != 0 :\n print \"WARNING: ssh call resulted in error code: \" + str( retval )\n error_occurred = True\n if halt_on_error :\n break\n\n if not error_occurred or not halt_on_error :\n if call_nat :\n retval = ssh_call( nat_instance.ip_address,\n nat_keyfile,\n cmd )\n if retval != 0 :\n print \"WARNING: ssh call resulted in error code: \" + str( retval )\n error_occurred = True\n\n return error_occurred", "def get_vpc_name(vpc):\n name = \"\"\n for i in vpc.tags:\n if 'Key' in i.keys() and i['Key'] == 'Name':\n name = i['Value']\n return name", "def vpc_id_lookup(session, vpc_domain):\n if session is None:\n return None\n\n client = session.client('ec2')\n response = client.describe_vpcs(Filters=[{\"Name\": \"tag:Name\", \"Values\": [vpc_domain]}])\n if len(response['Vpcs']) == 0:\n return None\n else:\n return response['Vpcs'][0]['VpcId']", "def ensure_vpc_created():\n response = EC2Client.describe_vpcs(Filters=[\n { 'Name': 'cidr', 'Values': [ Constants['VpcCidr'] ] },\n ])\n\n if len(response['Vpcs']) != 1:\n # untested\n vpc = EC2Resource.create_vpc(\n CidrBlock=Constants['VpcCidr'],\n AmazonProvidedIpv6CidrBlock=True\n )\n else:\n # usual case: VPC already exists\n vpc_id = response['Vpcs'][0]['VpcId']\n vpc = EC2Resource.Vpc(vpc_id)\n\n vpc.create_tags(Tags=[\n { 'Key': 'Name', 'Value': Constants['VpcName'] },\n ])\n\n vpc.wait_until_available()\n\n return vpc", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n vpc_region: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def setup_aws_data():\n\n target_project_arn = setup_project(PROJECT_NAME)\n device_pool_arn = setup_device_pool(target_project_arn, DEVICE_POOL_NAME)\n get_device_info(target_project_arn)", "def set_vpc_properties(ec2_client, vpc_id):\n try:\n vpc = ec2_client.Vpc(id=vpc_id)\n default_security_group = list(vpc.security_groups.all())[0]\n default_security_group.authorize_ingress(\n GroupName=default_security_group.group_name,\n CidrIp='0.0.0.0/0',\n IpProtocol='TCP',\n FromPort=int(PORT),\n ToPort=int(PORT)\n )\n except Exception as e:\n print(e)", "def get_address(project, zone, instance):\n return gcloud(\n project,\n 'addresses',\n 'describe',\n '%s-ip' % instance,\n '--region=%s' % get_region(zone),\n '--format=value(address)',\n )", "async def fetch_currencies(self, params={}):\n response = await self.publicGetWalletAssets(params)\n #\n # {\n # \"XBt\": {\n # \"asset\": \"XBT\",\n # \"currency\": \"XBt\",\n # \"majorCurrency\": \"XBT\",\n # \"name\": \"Bitcoin\",\n # \"currencyType\": \"Crypto\",\n # \"scale\": \"8\",\n # # \"mediumPrecision\": \"8\",\n # # \"shorterPrecision\": \"4\",\n # # \"symbol\": \"₿\",\n # # \"weight\": \"1\",\n # # \"tickLog\": \"0\",\n # \"enabled\": True,\n # \"isMarginCurrency\": True,\n # \"minDepositAmount\": \"10000\",\n # \"minWithdrawalAmount\": \"1000\",\n # \"maxWithdrawalAmount\": \"100000000000000\",\n # \"networks\": [\n # {\n # \"asset\": \"btc\",\n # \"tokenAddress\": \"\",\n # \"depositEnabled\": True,\n # \"withdrawalEnabled\": True,\n # \"withdrawalFee\": \"20000\",\n # \"minFee\": \"20000\",\n # \"maxFee\": \"10000000\"\n # }\n # ]\n # },\n # }\n #\n result = {}\n for i in range(0, len(response)):\n currency = response[i]\n asset = self.safe_string(currency, 'asset')\n code = self.safe_currency_code(asset)\n id = self.safe_string(currency, 'currency')\n name = self.safe_string(currency, 'name')\n chains = self.safe_value(currency, 'networks', [])\n depositEnabled = False\n withdrawEnabled = False\n networks = {}\n scale = self.safe_string(currency, 'scale')\n precisionString = self.parse_precision(scale)\n precision = self.parse_number(precisionString)\n for j in range(0, len(chains)):\n chain = chains[j]\n networkId = self.safe_string(chain, 'asset')\n network = self.network_id_to_code(networkId)\n withdrawalFeeRaw = self.safe_string(chain, 'withdrawalFee')\n withdrawalFee = self.parse_number(Precise.string_mul(withdrawalFeeRaw, precisionString))\n isDepositEnabled = self.safe_value(chain, 'depositEnabled', False)\n isWithdrawEnabled = self.safe_value(chain, 'withdrawalEnabled', False)\n active = (isDepositEnabled and isWithdrawEnabled)\n if isDepositEnabled:\n depositEnabled = True\n if isWithdrawEnabled:\n withdrawEnabled = True\n networks[network] = {\n 'info': chain,\n 'id': networkId,\n 'network': network,\n 'active': active,\n 'deposit': isDepositEnabled,\n 'withdraw': isWithdrawEnabled,\n 'fee': withdrawalFee,\n 'precision': None,\n 'limits': {\n 'withdraw': {\n 'min': None,\n 'max': None,\n },\n 'deposit': {\n 'min': None,\n 'max': None,\n },\n },\n }\n currencyEnabled = self.safe_value(currency, 'enabled')\n currencyActive = currencyEnabled or (depositEnabled or withdrawEnabled)\n minWithdrawalString = self.safe_string(currency, 'minWithdrawalAmount')\n minWithdrawal = self.parse_number(Precise.string_mul(minWithdrawalString, precisionString))\n maxWithdrawalString = self.safe_string(currency, 'maxWithdrawalAmount')\n maxWithdrawal = self.parse_number(Precise.string_mul(maxWithdrawalString, precisionString))\n minDepositString = self.safe_string(currency, 'minDepositAmount')\n minDeposit = self.parse_number(Precise.string_mul(minDepositString, precisionString))\n result[code] = {\n 'id': id,\n 'code': code,\n 'info': currency,\n 'name': name,\n 'active': currencyActive,\n 'deposit': depositEnabled,\n 'withdraw': withdrawEnabled,\n 'fee': None,\n 'precision': precision,\n 'limits': {\n 'amount': {\n 'min': None,\n 'max': None,\n },\n 'withdraw': {\n 'min': minWithdrawal,\n 'max': maxWithdrawal,\n },\n 'deposit': {\n 'min': minDeposit,\n 'max': None,\n },\n },\n 'networks': networks,\n }\n return result", "def aws():\n pass", "def __init__(__self__, *,\n vpc_id: pulumi.Input[str],\n description: Optional[pulumi.Input[str]] = None,\n ipv6_gateway_name: Optional[pulumi.Input[str]] = None,\n resource_group_id: Optional[pulumi.Input[str]] = None,\n spec: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None):\n pulumi.set(__self__, \"vpc_id\", vpc_id)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if ipv6_gateway_name is not None:\n pulumi.set(__self__, \"ipv6_gateway_name\", ipv6_gateway_name)\n if resource_group_id is not None:\n pulumi.set(__self__, \"resource_group_id\", resource_group_id)\n if spec is not None:\n warnings.warn(\"\"\"Field 'Spec' has been deprecated from provider version 1.205.0. IPv6 gateways do not distinguish between specifications. This parameter is no longer used.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"spec is deprecated: Field 'Spec' has been deprecated from provider version 1.205.0. IPv6 gateways do not distinguish between specifications. This parameter is no longer used.\"\"\")\n if spec is not None:\n pulumi.set(__self__, \"spec\", spec)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)", "def get_zone_output(name: Optional[pulumi.Input[Optional[str]]] = None,\n private_zone: Optional[pulumi.Input[Optional[bool]]] = None,\n resource_record_set_count: Optional[pulumi.Input[Optional[int]]] = None,\n tags: Optional[pulumi.Input[Optional[Mapping[str, str]]]] = None,\n vpc_id: Optional[pulumi.Input[Optional[str]]] = None,\n zone_id: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetZoneResult]:\n ...", "def delete_vpc(self, vpc_id, region):\n logger.debug(f\"Deleting vpc: {vpc_id}\")\n tmp_vpc_client = ibm.client(region=region)\n vpc_data = self.get_vpc_data(vpc_id, region)\n if not vpc_data:\n logger.warn(f\"vpc:{vpc_id} is set for deletion, but wasn't found\")\n return None\n self.delete_vms(tmp_vpc_client, vpc_id)\n self.delete_subnets(tmp_vpc_client, vpc_data, region)\n self.delete_gateways(tmp_vpc_client, vpc_id)\n # at this point vpc was already verified to be existing\n # thus no relevant exception to catch when deleting.\n tmp_vpc_client.delete_vpc(vpc_id)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n address_family: Optional[pulumi.Input[str]] = None,\n allocation_default_netmask_length: Optional[pulumi.Input[int]] = None,\n allocation_max_netmask_length: Optional[pulumi.Input[int]] = None,\n allocation_min_netmask_length: Optional[pulumi.Input[int]] = None,\n allocation_resource_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n arn: Optional[pulumi.Input[str]] = None,\n auto_import: Optional[pulumi.Input[bool]] = None,\n aws_service: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n ipam_scope_id: Optional[pulumi.Input[str]] = None,\n ipam_scope_type: Optional[pulumi.Input[str]] = None,\n locale: Optional[pulumi.Input[str]] = None,\n pool_depth: Optional[pulumi.Input[int]] = None,\n public_ip_source: Optional[pulumi.Input[str]] = None,\n publicly_advertisable: Optional[pulumi.Input[bool]] = None,\n source_ipam_pool_id: Optional[pulumi.Input[str]] = None,\n state: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'VpcIpamPool':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _VpcIpamPoolState.__new__(_VpcIpamPoolState)\n\n __props__.__dict__[\"address_family\"] = address_family\n __props__.__dict__[\"allocation_default_netmask_length\"] = allocation_default_netmask_length\n __props__.__dict__[\"allocation_max_netmask_length\"] = allocation_max_netmask_length\n __props__.__dict__[\"allocation_min_netmask_length\"] = allocation_min_netmask_length\n __props__.__dict__[\"allocation_resource_tags\"] = allocation_resource_tags\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"auto_import\"] = auto_import\n __props__.__dict__[\"aws_service\"] = aws_service\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"ipam_scope_id\"] = ipam_scope_id\n __props__.__dict__[\"ipam_scope_type\"] = ipam_scope_type\n __props__.__dict__[\"locale\"] = locale\n __props__.__dict__[\"pool_depth\"] = pool_depth\n __props__.__dict__[\"public_ip_source\"] = public_ip_source\n __props__.__dict__[\"publicly_advertisable\"] = publicly_advertisable\n __props__.__dict__[\"source_ipam_pool_id\"] = source_ipam_pool_id\n __props__.__dict__[\"state\"] = state\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n return VpcIpamPool(resource_name, opts=opts, __props__=__props__)", "def do_instance_list(cs, args):\n instances = cs.instances.list()\n\n fields = [\"OCCI ID\"]\n if args.detailed:\n fields.extend([\"Name\", \"State\", \"Network\"])\n occi_attrs = (\"occi.compute.hostname\",\n \"occi.compute.state\")\n\n pt = prettytable.PrettyTable([f for f in fields], caching=False)\n pt.align = 'l'\n\n for instance in instances:\n row = []\n attrs = instance.get('attributes', {})\n instance_id = attrs.get('occi.core.id', None)\n row.append(instance_id)\n\n if args.detailed and instance_id:\n if not all([i in attrs for i in occi_attrs]):\n instance = cs.instances.detail(instance_id)\n attrs = instance.get('attributes', {})\n\n name = attrs.get(\"occi.core.title\", None)\n if name is None:\n name = attrs.get(\"occi.compute.hostname\", None)\n row.append(name)\n row.append(attrs.get(\"occi.compute.state\", None))\n\n links = instance.get(\"links\", [])\n network = []\n for link in links:\n if occi.CATEGORIES[\"network\"] in link[\"kind\"][\"related\"]:\n # get IPv4\n ip = link[\"attributes\"].get(\n \"occi.networkinterface.address\",\n None\n )\n if not ip:\n ip = link[\"attributes\"].get(\n \"occi.networkinterface.ip6\",\n None\n )\n network.append(ip)\n row.append(network)\n\n pt.add_row(row)\n\n print(pt.get_string())", "def get_sample_via_data(self, ctx, params):\n # ctx is the context object\n # return variables are: sample\n #BEGIN get_sample_via_data\n upa = _get_upa_from_object(params)\n sid, ver = _get_sample_address_from_object(params, version_required=True)\n sample = self._samples.get_sample_via_data(\n _UserID(ctx[_CTX_USER]), upa, _SampleAddress(sid, ver))\n sample = _sample_to_dict(sample)\n #END get_sample_via_data\n\n # At some point might do deeper type checking...\n if not isinstance(sample, dict):\n raise ValueError('Method get_sample_via_data return value ' +\n 'sample is not type dict as required.')\n # return the results\n return [sample]", "def QueryAWSAccounts(self, parameters):\n # [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/cloud-connect-aws/QueryAWSAccounts \n FULL_URL = self.base_url+'/cloud-connect-aws/combined/accounts/v1'\n HEADERS = self.headers\n PARAMS = parameters\n result = self.Result()\n try:\n response = requests.request(\"GET\", FULL_URL, params=PARAMS, headers=HEADERS, verify=False)\n returned = result(response.status_code, response.headers, response.json())\n except Exception as e:\n returned = result(500, {}, str(e))\n \n return returned", "def info(ctx: CLIContext) -> None:\n fields = [\n keypair_fields['user_id'],\n keypair_fields['full_name'],\n keypair_fields['access_key'],\n keypair_fields['secret_key'],\n keypair_fields['is_active'],\n keypair_fields['is_admin'],\n keypair_fields['created_at'],\n keypair_fields['last_used'],\n keypair_fields['resource_policy'],\n keypair_fields['rate_limit'],\n keypair_fields['concurrency_used'],\n ]\n with Session() as session:\n try:\n kp = session.KeyPair(session.config.access_key)\n item = kp.info(fields=fields)\n ctx.output.print_item(item, fields)\n except Exception as e:\n ctx.output.print_error(e)\n sys.exit(1)", "def _generate_rds_instances_and_sg(resource, session):\n for db_instance in resource.describe_db_instances()[\"DBInstances\"]:\n for security_group in db_instance[\"VpcSecurityGroups\"]:\n yield db_instance, security_group, _get_sg_name(security_group[\"VpcSecurityGroupId\"], session)", "def example_data():\n return [\n {'id': 'cc-by', 'title': {'en': 'Creative Commons Attribution'},\n 'type': 'licenses'},\n {'id': 'cc0', 'title': {'en': 'Creative Commons Zero'},\n 'type': 'licenses'},\n ]", "def describe_rds_instances(rds, account, region, output_bucket):\n rds_list = rds.describe_db_instances().get('DBInstances')\n\n for rds_obj in rds_list:\n #print rds_obj\n output_bucket.append(misc.format_line((\n misc.check_if(account.get('name')),\n misc.check_if(region.get('RegionName')),\n misc.check_if(rds_obj.get('DBSubnetGroup').get('VpcId')),\n misc.check_if(rds_obj.get('DBInstanceIdentifier')),\n misc.check_if(rds_obj.get('DBInstanceClass')),\n misc.check_if(str(rds_obj.get('PubliclyAccessible'))),\n misc.check_if(rds_obj.get('Endpoint').get('Address')),\n misc.lookup(rds_obj.get('Endpoint').get('Address')),\n misc.check_if(str(rds_obj.get('Endpoint').get('Port')))\n )))", "def aws(ctx): # pylint: disable=unused-argument\n pass # pylint: disable=unnecessary-pass", "def show_instances(cls, args, config):\n instance_list = config.get_all_instances()\n if len(instance_list) > 0:\n table_data = []\n for i in instance_list:\n provider_obj = config.get_object_by_id(i.provider_id, 'Provider')\n if provider_obj is None:\n continue\n provider_name = provider_obj.name\n #print \"provider_obj.type\",provider_obj.type\n if i.worker_group_id is not None:\n name = config.get_object_by_id(i.worker_id, 'WorkerGroup').name\n itype = 'worker'\n else:\n name = config.get_object_by_id(i.controller_id, 'Controller').name\n itype = 'controller'\n table_data.append([i.id, provider_name, i.provider_instance_identifier, itype, name])\n table_print(['ID', 'provider', 'instance id', 'type', 'name'], table_data)\n else:\n print \"No instance found\"", "def get_vpc_subnets(self, vpc_data, region, field=\"\"):\n if not vpc_data:\n return None\n # pylint: disable=line-too-long\n tmp_vpc_client = ibm.client(region=region)\n subnets_attached_to_routing_table = tmp_vpc_client.list_subnets(\n routing_table_id=vpc_data[\"default_routing_table\"][\"id\"]\n ).get_result()[\"subnets\"]\n if field:\n return [subnet[field] for subnet in subnets_attached_to_routing_table]\n else:\n return subnets_attached_to_routing_table" ]
[ "0.6153667", "0.595063", "0.57215375", "0.5484518", "0.5472075", "0.54143125", "0.53245234", "0.5313841", "0.5285683", "0.52541167", "0.52541167", "0.52541167", "0.52541167", "0.52541167", "0.52541167", "0.5244572", "0.5241493", "0.5241493", "0.5241493", "0.5241493", "0.5241493", "0.5241493", "0.5241493", "0.5241493", "0.5241493", "0.52050024", "0.51860917", "0.5134325", "0.5125855", "0.5066078", "0.5066078", "0.5066078", "0.5066078", "0.5066078", "0.5066078", "0.5065736", "0.5038736", "0.5027225", "0.5019903", "0.50036365", "0.4970963", "0.4958274", "0.4928172", "0.49236163", "0.49126217", "0.49030325", "0.4903027", "0.4903027", "0.4903027", "0.4903027", "0.4903027", "0.4903027", "0.4903027", "0.4903027", "0.4903027", "0.4903027", "0.4903027", "0.4903027", "0.4901517", "0.48469946", "0.4844016", "0.47745812", "0.47632745", "0.47467288", "0.47421476", "0.46698853", "0.46693122", "0.46330443", "0.46299562", "0.46262994", "0.4605843", "0.4591404", "0.4574619", "0.45669526", "0.45415762", "0.45348588", "0.45237717", "0.45214802", "0.44948322", "0.44929406", "0.44929034", "0.44767717", "0.4472143", "0.4465713", "0.4452523", "0.44444644", "0.442963", "0.44230562", "0.44160715", "0.4399379", "0.4398617", "0.4393799", "0.43822074", "0.43780956", "0.43719813", "0.43624064", "0.43617502", "0.4354304", "0.43535456", "0.43453005" ]
0.56605554
3
>>> find_good_recipes(9, 10) '5158916779' >>> find_good_recipes(5, 10) '0124515891' >>> find_good_recipes(18, 10) '9251071085' >>> find_good_recipes(2018, 10) '5941429882'
def find_good_recipes(improvement_num, count): recipes = [3, 7] elf1 = 0 elf2 = 1 while len(recipes) <= improvement_num + count: elf1_value = recipes[elf1] elf2_value = recipes[elf2] recipe_sum = elf1_value + elf2_value if recipe_sum > 9: recipe_string = f"{recipe_sum:02d}" recipes.append(int(recipe_string[:1])) recipes.append(int(recipe_string[1:])) else: recipes.append(recipe_sum) elf1 = loop_around(1 + elf1 + elf1_value, len(recipes)) elf2 = loop_around(1 + elf2 + elf2_value, len(recipes)) answer_string = "" for i in range(improvement_num, improvement_num + count): answer_string += str(recipes[i]) return answer_string
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_similar_recipes(self):\n pass", "def test_get_random_recipes(self):\n pass", "def measure_the_work(pattern_to_find):\n recipes = [3, 7]\n keys = [int(key) for key in pattern_to_find]\n elf1 = 0\n elf2 = 1\n not_found = True\n\n while not_found:\n elf1_value = recipes[elf1]\n elf2_value = recipes[elf2]\n\n recipe_sum = elf1_value + elf2_value\n\n if recipe_sum > 9:\n recipe_string = f\"{recipe_sum:02d}\"\n recipes.append(int(recipe_string[:1]))\n recipes.append(int(recipe_string[1:]))\n else:\n recipes.append(recipe_sum)\n\n elf1 = loop_around(1 + elf1 + elf1_value, len(recipes))\n elf2 = loop_around(1 + elf2 + elf2_value, len(recipes))\n if recipes[-1] == keys[-1] or recipes[-2] == keys[-1]:\n\n if pattern_to_find in ''.join(map(str, recipes[-(len(keys) + 2):])):\n not_found = False\n if recipes[-1] == keys[-1]:\n return len(recipes) - len(keys)\n else:\n return len(recipes) - len(keys) - 1", "def cakes(recipe, available):\n return min({k: available[k]//recipe[k] if k in available else 0 for k in recipe}.values())", "def search_recipe(ingredients):\n\n params = '+'.join(ingredients.split())\n url_search = SEARCH_URL.format(params)\n response = req.get(url_search)\n\n return response.content", "def test_search_recipes_by_nutrients(self):\n pass", "def test_search_recipes_by_ingredients(self):\n pass", "def find_string(n, c_length, start=None):\n \n c = range(c_length)\n if start is None:\n i = get_minimum(n)\n else:\n i = start\n\n strings = [e for e in generate_strings(n, c)]\n while True:\n for x, s in enumerate(generate_strings(i, c)):\n if check_string(s, strings):\n return s\n\n if x % 1000000 == 0:\n print x\n i += 1\n print \"processing %s\" % i", "def test_search_by_ingredients(self):\n recipe_id = self.request_mgr.search_by_ingredients(['butter', 'sugar', 'eggs'])\n self.assertGreater(recipe_id, 0)", "def test_search_recipes(self):\n pass", "def check_recipes(self):\n\n self.recipe = None\n\n for recipe in all_recipes:\n if recipe.matches(self.crafting, self.crafting_stride):\n self.recipe = recipe", "async def get_recipes_from_components(\n fridge_components: dict, db_path: Path = DB_PATH\n) -> list:\n available_components = set(fridge_components.keys())\n logger.debug(\"Available components: {}\".format(available_components))\n\n # Updated counters of users' components\n for component in available_components:\n await execute_query(\n \"UPDATE components SET total_encountered = 1 + \"\n \"(SELECT total_encountered FROM components WHERE component = ?) \"\n \"WHERE component = ?\",\n (component, component),\n db_path=db_path,\n )\n logger.debug(\"Updated component counters of: {}\".format(available_components))\n\n recipes = await get_query_results(\n \"SELECT recipe_name, components FROM recipes\", db_path=db_path\n )\n\n # Select recipes that are possible to prepare with users' components\n selected_recipes = []\n for recipe in recipes:\n recipe_components = json.loads(recipe[1])\n recipe_components_names = set([x[\"item\"] for x in recipe_components])\n logger.debug(\n \"Recipe '{}' contains '{}'\".format(recipe[0], recipe_components_names)\n )\n\n # If user has all components of the recipe, find minimum amount that can be prepared\n minimum_quantity = 0\n if recipe_components_names.issubset(available_components):\n logger.debug(\n \"Recipe '{}' can be cooked with available components.\".format(recipe[0])\n )\n\n for components in recipe_components:\n available_quantity = fridge_components[components[\"item\"]]\n needed_quantity = components[\"q\"]\n\n if minimum_quantity:\n minimum_quantity = min(\n minimum_quantity, available_quantity / needed_quantity\n )\n else:\n # First cycle\n minimum_quantity = available_quantity / needed_quantity\n\n selected_recipes.append({\"name\": recipe[0], \"quantity\": minimum_quantity})\n\n selected_recipes_names = [x[\"name\"] for x in selected_recipes]\n\n # Update last recommended time for recipes\n for recipe_name in selected_recipes_names:\n current_time = int(time())\n\n await execute_query(\n \"UPDATE recipes SET last_recommended = ? WHERE recipe_name = ?\",\n (current_time, recipe_name),\n db_path=db_path,\n )\n logger.debug(\"Updated last recommended times of: {}\".format(selected_recipes_names))\n\n return selected_recipes", "def get_number_of_search_recipes(cuisine):\n cuisine_search_link = SEARCH_URL.format(0, cuisine)\n cuisine_recipes = get_content_from_dynamic_url(cuisine_search_link)\n if not cuisine_recipes:\n print \"no content for:\", cuisine_search_link\n return None\n soup_cuisine = BeautifulSoup(cuisine_recipes)\n # get recipe-count and convert it into integer\n return int(soup_cuisine.find(\"h1\", {\"class\": \"search-title\"}).find(\"em\").get_text())", "def get_recipe_chef(soup_recipe):\n chef_name = soup_recipe.find(\"div\",\n {\"class\": \"recipe-header__chef recipe-header__chef--first\"}).find(\"a\")\n if not chef_name:\n chef_name = soup_recipe.find(\"div\",\n {\"class\": \"recipe-header__chef recipe-header__chef--first\"}).find(\"span\")\n if not chef_name:\n return None\n return chef_name.get_text()", "def get_recipe_chef(soup_recipe):\n chef_name = soup_recipe.find(\"span\", {\"itemprop\": \"author\"})\n if not chef_name:\n return None\n return chef_name.get_text().strip()", "def get_recipe_difficulty(soup_recipe):\n difficulty = soup_recipe.find(\"span\", {\"class\": \"frr_difficulty fr_sep\"})\n if not difficulty:\n return None\n return difficulty.get_text().strip()", "def find_odds(numbers):\n\n pass # remove this line when starting your function", "def get_cuisine_recipes(search_cuisisnes, cuisines):\n cuisine_df = pd.DataFrame()\n for cuisine in search_cuisisnes:\n cuisine_dict = {}\n cuisine_dict['cuisine'] = cuisine\n cuisine_dict['source'] = 'BBC Good Food'\n cuisine_no_space = cuisine.lower().replace(' & ', '-').replace(' ', '-')\n recipes_cuisine_search = get_number_of_search_recipes(cuisine_no_space)\n cuisine_dict['pages'] = int(ceil(recipes_cuisine_search /\n NUMBER_OF_RECIPES_PER_SEARCH_PAGE))\n collection = False\n if cuisine in cuisines:\n cuisine_dict['pages'] += 1\n collection = True\n cuisine_dict['recipes_details'] = get_recipe_links(cuisine_no_space,\n cuisine_dict['pages']-1, collection)\n cuisine_dict['num_recipes'] = len(cuisine_dict['recipes_links'])\n print '#####'\n print \"Cuisine: %s \\t Number of recipes: %d \\t\\t Number of pages: %d\" \\\n % (cuisine, cuisine_dict['num_recipes'], cuisine_dict['pages'])\n coll.insert_one(cuisine_dict)\n cuisine_df = cuisine_df.append(pd.DataFrame.from_dict(cuisine_dict, orient='columns'),\n ignore_index=True)\n return cuisine_df", "def get_servings(soup_recipe):\n servings = soup_recipe.find(\"span\", {\"itemprop\": \"recipeYield\"})\n if not servings:\n return None\n return servings.get_text().strip()", "def search_recipes():\r\n cuisine, course, allergens = Helpers.dropdowns(coll_cuisines, coll_courses, coll_allergens)\r\n args = request.args.get\r\n args_list = request.args.getlist\r\n\r\n # Get Search and Pagination arguments from URL\r\n keyword_args = (\r\n args(\"search_keys\") if args(\"search_keys\") is not None else \"\")\r\n cuisineFilter_args = (\r\n args(\"cuisine_filter\") if args(\"cuisine_filter\") is not None else \"\")\r\n courseFilter_args = (\r\n args(\"course_filter\") if args(\"course_filter\") is not None else \"\")\r\n allergenFilter_args = (\r\n args_list(\"allergen_filter\") if args_list(\r\n \"allergen_filter\") is not None else [])\r\n page_args = int(args(\"page\")) if args(\"page\") is not None else 1\r\n\r\n # Set search variables\r\n search_keywords = (\r\n keyword_args.split() if keyword_args is not None else \"\")\r\n search_cuisine = (\r\n cuisineFilter_args if cuisineFilter_args is not None else \"\")\r\n search_course = (\r\n courseFilter_args if courseFilter_args is not None else \"\")\r\n search_allergens = (\r\n allergenFilter_args if allergenFilter_args != [] else \"\")\r\n\r\n # Join search variables and perform search\r\n search = (\r\n '\"' + '\" \"'.join(search_keywords) +\r\n '\" \"' + ''.join(search_cuisine) +\r\n '\" \"' + ''.join(search_course) +\r\n '\"' + ' -' + ' -'.join(search_allergens))\r\n search_results = coll_recipes.find(\r\n {\"$text\": {\"$search\": search}}).skip((page_args * 8) - 8)\\\r\n .limit(8).sort([(\"views\", -1)])\r\n\r\n # Pagination\r\n (\r\n pages, previous_page, next_page, count,\r\n total_recipes, results_count) = Helpers.pagination(\r\n search_results, page_args, coll_recipes)\r\n\r\n return render_template(\r\n \"searchrecipes.html\",\r\n recipes=search_results,\r\n cuisine=sorted(cuisine),\r\n course=course,\r\n allergens=allergens,\r\n keywords=keyword_args,\r\n f_cuisine=cuisineFilter_args,\r\n f_course=courseFilter_args,\r\n f_allergen=allergenFilter_args,\r\n pages=pages,\r\n results_count=results_count,\r\n total_recipes=total_recipes,\r\n count=count,\r\n page=page_args,\r\n next_page=next_page,\r\n previous_page=previous_page)", "def test_get_recipe_information(self):\n pass", "def test_summarize_recipe(self):\n pass", "def get_servings(soup_recipe):\n servings = soup_recipe.find(\"span\", {\"itemprop\": \"recipeYield\"})\n if not servings:\n return None\n return servings.get_text()", "def test_search_by_bad_ingredients(self):\n recipe_id = self.request_mgr.search_by_ingredients(['asdfadsfa'])\n self.assertEqual(recipe_id, None)", "def solution(input_string):\n __check_validation(input_string)\n substrings = __get_all_possible_substrings(base_string=input_string)\n best_by_leftovers = __get_candidates_best_by_leftovers_count(substrings=substrings, base_string=input_string)\n best_by_quantity = __get_candidates_best_by_elements_count(substrings=best_by_leftovers)\n return best_by_quantity[0][1]", "def test_known_common_stable_isotopes_cases():\n assert \"H-1\" in known_isotopes(\"H\")\n assert \"D\" in known_isotopes(\"H\")\n assert \"T\" in known_isotopes(\"H\")\n assert \"Be-8\" in known_isotopes(\"Be\")\n assert \"Og-294\" in known_isotopes(118)\n assert \"H-1\" in common_isotopes(\"H\")\n assert \"H-4\" not in common_isotopes(1)\n assert \"H-1\" in stable_isotopes(\"H\")\n assert \"D\" in stable_isotopes(\"H\")\n assert \"T\" not in stable_isotopes(\"H\")\n assert \"Fe-56\" in common_isotopes(\"Fe\", most_common_only=True)\n assert \"He-4\" in common_isotopes(\"He\", most_common_only=True)", "def get_recommendations(soup_recipe):\n ratings = soup_recipe.find(\"meta\", {\"itemprop\": \"ratingValue\"})[\"content\"]\n ratings_count = soup_recipe.find(\"meta\", {\"itemprop\": \"ratingCount\"})[\"content\"]\n if ratings == 0:\n return None, None\n return ratings, ratings_count", "def get_recipe_details(recipe_links):\n cuisine_recipes = {}\n for r in recipe_links:\n soup_recipe = BeautifulSoup(r)\n if \"www.chowhound.com\" in r.a[\"href\"]:\n recipe = {}\n recipe['r_link'] = r.a[\"href\"]\n print \"recipe link: \", recipe['r_link']\n soup_recipe = get_recipe(recipe['r_link'])\n recipe['recipe title'] = get_recipe_title(soup_recipe)\n recipe['chef'] = get_recipe_chef(soup_recipe)\n recipe['description'] = get_description(soup_recipe)\n recipe['ingredient list'] = get_recipe_ingredients(soup_recipe)\n recipe['preperation steps'] = get_recipe_preperation(soup_recipe)\n recipe['total_time'], recipe['active_time'] = get_recipe_time(soup_recipe)\n recipe['servings'] = get_servings(soup_recipe)\n recipe['skill_level'] = get_recipe_difficulty(soup_recipe)\n recipe['rating'], recipe['rating count'] = get_ratings(soup_recipe)\n recipe['nutritional_info'] = get_nutrition_per_serving(soup_recipe)\n recipe['image_source'] = get_image_source(soup_recipe)\n cuisine_recipes[recipe['recipe title']] = recipe\n return cuisine_recipes", "def find_recipe(self, recipe_id):\n return self.find_doc('recipe', 'name', self.get_unique_recipe_name(recipe_id))", "def randomly_pick_recipe(cls):\n return random.choice(cls._recipes)", "def search_recipes(\n *,\n keyword: Optional[str] = Query(None, min_length=3, example=\"chicken\"),\n max_results: Optional[int] = 10,\n) -> dict:\n if not keyword:\n # we use Python list slicing to limit results\n # based on the max_results query parameter\n return {\"results\": RECIPES[:max_results]}\n\n results = filter(lambda recipe: keyword.lower() in recipe[\"label\"].lower(), RECIPES)\n return {\"results\": list(results)[:max_results]}", "def get_recipe_by_name(self, name):\n\t\tfor key, val in self.recipes_list.items():\n\t\t\tfor a, b in val.items():\n\t\t\t\tif name == a:\n\t\t\t\t\tprint(str(b))", "def test_find_all_substrings_01():\n assert (U.find_all_substrings(s, 17, 300) ==\n U.find_all_substrings(s, 17, 27))\n s2 = ''.join([random.choice(s) for i in range(100)])\n assert (U.find_all_substrings(s2, 17, 300) ==\n U.find_all_substrings(s2, 17, len(s2) + 1))", "def brute_force_all(foods, done_count, cal_left, pro_left, fat_left, carb_left):\n # using a tolerance of 50 cal/5 carb/fat/pro\n if cal_left < 50 and pro_left < 5 and fat_left < 5 and carb_left < 5:\n return {}\n\n if len(foods) <= done_count: # done going through all the foods\n return {0: 9999999}\n\n # calculate scenario where you don't use the current food\n foods_used_a = brute_force_all(\n foods, done_count + 1, cal_left, pro_left, fat_left, carb_left)\n\n # if current food violates reqs, then don't bother calculating for it\n if ((cal_left - foods[done_count]['calories']) < -50\n or (pro_left - foods[done_count]['protein']) < -5\n or (fat_left - foods[done_count]['fat']) < -5\n or (carb_left - foods[done_count]['carbs']) < -5):\n return foods_used_a\n\n # calculate scenario where you use the current food\n foods_used_b = brute_force_all(\n foods, done_count, cal_left - foods[done_count]['calories'],\n pro_left - foods[done_count]['protein'],\n fat_left - foods[done_count]['fat'],\n carb_left - foods[done_count]['carbs'])\n try:\n foods_used_b[done_count] += 1\n except KeyError:\n foods_used_b[done_count] = 1\n\n if len(foods_used_b) == 0:\n return foods_used_a\n\n # calculate cheapest and return\n if cost(foods, foods_used_a) > cost(foods, foods_used_b):\n return foods_used_b\n return foods_used_a", "def solution(n: int = 28123) -> int:\n\n nums = range(1, n+1)\n abundant = list(filter(is_abundant, nums))\n abundant_sums = set(all_sums(abundant, n))\n fit = set(nums) - abundant_sums\n return fit", "def versions_of_recipe(recipe):\n\n versions = []\n for entry in os.listdir(\"../../meta-mender-core/recipes-mender/%s/\" % recipe):\n match = re.match(r\"^%s_([1-9][0-9]*\\.[0-9]+\\.[0-9]+[^.]*)\\.bb\" % recipe, entry)\n if match is not None:\n versions.append(match.group(1))\n return versions", "def brute_force(city_list):\n start = time.time()*1000\n shortest = exhaustive_search(city_list,6)\n stop = time.time()*1000\n print(\"Shortest tour for 6 first cities:\", tour_distance(shortest))\n print (\"Time spent on 6 first cities:\", \"%.2f\" % (stop-start), \"ms\")\n print(\"-\")\n \n start = time.time()*1000\n shortest = exhaustive_search(city_list,7)\n stop = time.time()*1000\n print(\"Shortest tour for 7 first cities:\", tour_distance(shortest))\n print (\"Time spent on 7 first cities:\", \"%.2f\" % (stop-start), \"ms\")\n print(\"-\")\n \n start = time.time()*1000\n shortest = exhaustive_search(city_list,8)\n stop = time.time()*1000\n print(\"Shortest tour for 8 first cities:\", tour_distance(shortest))\n print (\"Time spent on 8 first cities:\", \"%.2f\" % (stop-start), \"ms\")\n print(\"-\")\n \n start = time.time()*1000\n shortest = exhaustive_search(city_list,9)\n stop = time.time()*1000\n print(\"Shortest tour for 9 first cities:\", tour_distance(shortest))\n print (\"Time spent on 9 first cities:\", \"%.2f\" % (stop-start), \"ms\")\n print(\"-\")\n \n start = time.time()*1000\n shortest = exhaustive_search(city_list,10)\n stop = time.time()*1000\n print(\"Shortest tour for 10 first cities:\", tour_distance(shortest))\n print (\"Time spent on 10 first cities:\", \"%.2f\" % (stop-start), \"ms\")\n print(\" \")", "def get_recipes(recipe_list_path):\n autopkgr_path = os.path.expanduser(\n \"~/Library/Application Support/AutoPkgr/recipe_list.txt\")\n recipe_list_path = recipe_list_path if recipe_list_path else autopkgr_path\n if not os.path.exists(recipe_list_path):\n sys.exit(\"recipe_list file %s does not exist!\" % recipe_list_path)\n with open(recipe_list_path) as recipe_list:\n recipes = [recipe.strip() for recipe in recipe_list]\n return recipes", "def run(self):\n matched = ['Program Description']\n matched += sorted(self.matching_recipes())\n\n msg = 'Your Search For:'\n msg += PREFIX + PREFIX.join([\"'\" + word + \"'\" for word in self.words])\n msg += '\\nMatched These Recipes:'\n msg += PREFIX + PREFIX.join(matched)\n print(msg)\n return matched", "def brute_force_calories_only(foods, done_count, calories_left):\n if calories_left < 50: # using a tolerance of 50 calories\n return {}\n\n if len(foods) <= done_count: # done going through all the foods\n return {0: 999999}\n\n # calculate for scenario where you ignore the current food and don't use it\n foods_used_a = brute_force_calories_only(\n foods, done_count + 1, calories_left)\n # calculate for scenario where you use the current food\n if (calories_left - foods[done_count]['calories']) < -50:\n return foods_used_a\n foods_used_b = brute_force_calories_only(\n foods, done_count, calories_left - foods[done_count]['calories'])\n try:\n foods_used_b[done_count] += 1\n except KeyError:\n foods_used_b[done_count] = 1\n\n if cost(foods, foods_used_a) > cost(foods, foods_used_b):\n return foods_used_b\n return foods_used_a", "def check_mmum_recipe(recipe):\n\n if recipe[\"Maischform\"] != \"infusion\":\n print(\"[W] Only infusion is supported...\")\n return False\n\n single = [\"Infusion_Hauptguss\", \"Infusion_Einmaischtemperatur\",\"Abmaischtemperatur\",\"Kochzeit_Wuerze\",\"Nachguss\",\"Hefe\",\"Gaertemperatur\"]\n for k in single:\n try:\n _=recipe[k]\n except KeyError:\n print(f\"[E] invalid recipe. This field missed: {k}\")\n return False\n\n \"\"\" This is because this json array is soo stupid -.- \"\"\"\n cnt={\n 'malz':0,\n 'rast':0,\n 'hopfen_vwh':0,\n 'hopfen':0,\n 'extra_ingredient':0,\n 'hopfen_stopf':0,\n 'extra_gaerung':0,\n }\n for k in recipe:\n key = k.split('_') \n if k[:-1] == \"Malz\":\n cnt['malz'] += 1\n elif k[:17] == \"Infusion_Rastzeit\":\n cnt['rast'] += 1 \n elif k[:6] == \"Hopfen\": \n if len(key) == 3:\n if key[2] == \"Sorte\":\n cnt['hopfen'] += 1\n elif len(key) == 4:\n if key[3] == \"Sorte\":\n cnt['hopfen_vwh'] += 1\n elif k[:19] == \"WeitereZutat_Wuerze\": \n if k.split('_')[3] == \"Name\":\n cnt['extra_ingredient'] += 1\n elif key[0] == \"Stopfhopfen\":\n if key[2] == \"Sorte\":\n cnt['hopfen_stopf'] += 1\n elif key[0] == \"WeitereZutat\":\n if key[3] == \"Name\":\n cnt['extra_gaerung'] += 1\n \n if not cnt['hopfen'] or not cnt['malz'] or not cnt['rast']:\n print(f\"[E] invalid recipe, no counter of cnt: {cnt}\")\n return False\n\n return cnt", "def hunt(s):\n\n\t# ISBN-13s\n\tfor regexp in [r'(?:[^0-9]|^)((?:[0-9]-*){12}[0-9X])(?:[^0-9X]|$)',\n\t\t\t\t r'(?:[^0-9]|^)((?:[0-9]-*){9}[0-9X])(?:[^0-9X]|$)']:\n\t\tfor match in re.finditer(regexp, s):\n\t\t\tcandidate = match.group(1)\n\t\t\tif verify(candidate):\n\t\t\t\tyield candidate.replace(\"-\",\"\")", "def part_2(data: Iterator[str]) -> int:\n return solve(data, 5)", "def get_recipes(num_of_pages):\n\n recipe_df = pd.DataFrame()\n recipe_dict = {}\n recipe_dict['cuisine'] = 'Unknown'\n recipe_dict['source'] = 'Chowhound'\n recipe_dict['num_recipes'] = NUMBER_OF_RECIPES_PER_PAGE * num_of_pages\n recipe_dict['pages'] = num_of_pages\n print '#####'\n print \"Cuisine: %s \\t Number of recipes: %r \\t\\t Number of pages: %r\" \\\n % (recipe_dict['cuisine'], recipe_dict['num_recipes'], recipe_dict['pages'])\n recipe_dict['recipe_links'] = get_recipe_links(recipe_dict['pages'])\n recipe_dict['recipes_details'] = get_recipe_links(recipe_dict['pages'])\n coll.insert_one(recipe_dict)\n recipe_df = pd.DataFrame.from_dict(recipe_dict, orient='columns')\n return recipe_df", "def list_recipes(environ, start_response):\n return list_entities(environ, start_response, 'list_recipes')", "def test_find_all_substrings_02():\n assert (U.find_all_substrings(s, 17, 5) ==\n U.find_all_substrings(s, 17, 18))\n s2 = ''.join([random.choice(s) for i in range(100)])\n assert (U.find_all_substrings(s2, 17, 5) ==\n U.find_all_substrings(s2, 17, 18))", "def brute_force_search_solution():\n return len(coin_search(TOTAL, COINS))", "def RecipeToText(recipe):\n\n\tout = []\n\tworld = None\n\tfor (annotation, next_world) in recipe[1:]:\n\t\tcommand = annotation[0]\n\t\targuments = annotation[1]\n\n\t\trecipe_text = ''\n\t\tif command == 'create_ing':\n\t\t\t# TODO: When computing BLEU score, we may wish to ignore create_ing\n\t\t\t# commands since they are trivially translated\n\t\t\trecipe_text += '%s.' % arguments[1]\n\n\t\telif command == 'create_tool':\n\t\t\t# TODO: This is a horrible hack but we need some way to make sure that the\n\t\t\t# length of the outputted string is equal to that of the list of original\n\t\t\t# texts.\n\t\t\trecipe_text = '<create_tool>'\n\n\t\telif command == 'combine':\n\t\t\trecipe_text += 'Combine '\n\n\t\t\trecipe_text += ', '.join([world.I_d[ing] for ing in arguments[0]])\n\n\t\t\tif not IsNull(arguments[3]):\n\t\t\t\trecipe_text += ', %s' % arguments[3]\n\n\t\t\trecipe_text += '.'\n\n\t\telif command == 'separate':\n\t\t\trecipe_text += 'Separate '\n\t\t\trecipe_text += '%s and %s' % (world.I_d[arguments[0]], next_world.I_d[arguments[1]])\n\n\t\t\tif not IsNull(arguments[5]):\n\t\t\t\trecipe_text += ', %s' % arguments[5]\n\n\t\t\trecipe_text += '.'\n\n\t\telif command == 'put':\n\t\t\trecipe_text += 'Put %s in %s. ' % (world.I_d[arguments[0]], world.T_d[arguments[1]])\n\n\t\telif command == 'remove':\n\t\t\trecipe_text += 'Remove %s from %s. ' % (world.I_d[arguments[0]], world.T_d[arguments[1]])\n\n\t\telif command == 'cut':\n\t\t\trecipe_text += 'Chop %s' % world.I_d[arguments[0]]\n\n\t\t\tif not IsNull(arguments[1]):\n\t\t\t\trecipe_text += ' with %s' % world.T_d[arguments[1]]\n\n\t\t\tif not IsNull(arguments[4]):\n\t\t\t\trecipe_text += ', %s' % arguments[4]\n\n\t\t\trecipe_text += '.'\n\n\t\telif command == 'mix':\n\t\t\trecipe_text += 'Mix %s' % world.I_d[arguments[0]]\n\n\t\t\tif not IsNull(arguments[1]):\n\t\t\t\trecipe_text += ' with %s' % world.T_d[arguments[1]]\n\n\t\t\tif not IsNull(arguments[4]):\n\t\t\t\trecipe_text += ', %s' % arguments[4]\n\n\t\t\trecipe_text += '.'\n\n\t\telif command == 'cook':\n\t\t\trecipe_text += 'Cook %s' % world.I_d[arguments[0]]\n\n\t\t\tif not IsNull(arguments[1]):\n\t\t\t\trecipe_text += ' with %s' % world.T_d[arguments[1]]\n\n\t\t\tif not IsNull(arguments[4]):\n\t\t\t\trecipe_text += ', %s' % arguments[4]\n\n\t\t\trecipe_text += '.'\n\n\t\telif command == 'do':\n\t\t\trecipe_text += 'Taking %s' % world.I_d[arguments[0]]\n\n\t\t\tif not IsNull(arguments[1]):\n\t\t\t\trecipe_text += ' with %s' % world.T_d[arguments[1]]\n\n\t\t\tif not IsNull(arguments[4]):\n\t\t\t\trecipe_text += ', %s' % arguments[4]\n\n\t\t\trecipe_text += '.'\n\n\t\telif command == 'serve':\n\t\t\trecipe_text += 'Serve %s' % world.I_d[arguments[0]]\n\n\t\t\tif not IsNull(arguments[1]):\n\t\t\t\trecipe_text += ', %s' % arguments[1]\n\n\t\t\trecipe_text += '.'\n\n\t\telif command == 'set':\n\t\t\trecipe_text += 'Set %s on %s. ' % (world.T_d[arguments[0]], arguments[1])\n\n\t\telif command == 'leave':\n\t\t\trecipe_text += 'Leave %s' % world.I_d[arguments[0]]\n\n\t\t\tif not IsNull(arguments[1]):\n\t\t\t\trecipe_text += ', %s' % arguments[1]\n\n\t\t\trecipe_text += '.'\n\n\t\telif command == 'chefcheck':\n\t\t\trecipe_text += 'Check %s for %s. ' % (world.I_d[arguments[0]], arguments[1])\n\n\t\tworld = next_world\n\t\tout.append(recipe_text)\n\n\treturn out", "def get_random_recipe(self):\n\n url = \"{}/recipes/random?number=1\".format(self.base_url)\n return self._get_recipe_ids(url, random_recipe=True)[0]", "def test_get_recipe_by_id(self):\n recipe = self.request_mgr.get_recipe_by_id(35354)\n self.assertIn(\"Guinness\", recipe.get('title'))", "def schrodingers_cat(peek=False):\n from random import choice, randint\n if peek:\n if randint(1, 10) % 2 == 0:\n # RIP\n return \"Nothing at all\"\n else:\n return poke_the_cat(Cat.LEGS, catnip=True)\n else:\n garbled_cries = \"mew meow wokka beocat ekkie\".split()\n return choice(garbled_cries)", "def get_recipe(recipe_link):\n recipe_response = get_content_from_url(recipe_link)\n if not recipe_response:\n print \"no content for:\", recipe_link\n return None\n return BeautifulSoup(recipe_response)", "def problem_52():\n\n for number in xrange(1, 123456789):\n sorted_num = ''.join(sorted(str(number)))\n if len([value for value in xrange(2, 7)\n if ''.join(sorted(str((value * number)))) == sorted_num]) == 5:\n return number", "def brute_force_hashed(seats):\n seats = set(seats)\n for seat in seats:\n if seat + 1 not in seats and seat + 2 in seats:\n return seat + 1\n\n return None", "def get_category_recipe(filters, db_conn, host_url):\n try:\n result = db_conn[\"recipes\"].find(\n filters).sort(\"createdOn\", -1).limit(9)\n recipe_list = map_response(result, host_url)\n\n return recipe_list\n\n except Exception as e:\n print(e)\n return {\"success\": False, \"message\": \"Error in api: \" + str(e)}", "def bottles_required(beer_recipe):\r\n bottles_demanded = upcoming_future_prediction(beer_recipe)\r\n bottles_ready = delivery_information[beer_recipe]\r\n if bottles_ready > bottles_demanded:\r\n bottles_needed = 0\r\n else:\r\n bottles_needed = bottles_demanded - bottles_ready\r\n return int(bottles_demanded), bottles_ready, int(bottles_needed)", "def get_candidates(beer):\n span = tracer.current_span()\n span.set_tags({'beer.name': beer.name, 'beer.hops': beer.hops})\n\n db = DonutStats.instance()\n\n # find our optimal sugar level Donuts above or below this level\n # will certainly not be a good match\n optimal_sugar_level = db.get_optimal_sugar_level(beer.hops)\n return db.get_by_sugar_level(optimal_sugar_level, limit=10)", "def shorten_ingredient(ingredient):\n terms = set([\"chicken\", \"flour\", \"milk\", \"rice\", \"pork\",\n \"beef\", \"lettuce\", \"beans\", \"shrimp\", \"salt\",\n \"cream\"])\n # get just the overlapping terms in the set\n intersection = set(ingredient.split(\" \")).intersection(terms)\n if intersection:\n return str(list(intersection)[0]) # casts the set to a list for indexing\n else:\n return ingredient", "def test_hackerrank_sample2(self):\n result = find_digits(1012)\n self.assertEquals(result, 3)", "async def get_last_recommended_recipes(\n time_period: int = 3600, db_path: Path = DB_PATH\n) -> dict:\n recipes = await get_query_results(\n \"SELECT recipe_name, last_recommended FROM recipes\", db_path=db_path\n )\n\n current_time = int(time())\n cutoff_point = current_time - time_period\n\n recommended_recipes = []\n for recipe_name, last_recommended in recipes:\n if last_recommended > cutoff_point:\n recommended_recipes.append(recipe_name)\n\n recommended_recipes.sort()\n\n return {\"last_recommended_recipes\": recommended_recipes}", "def list_recipes(self, recipes):\n prefix, suffix = self._get_jsonp()\n return prefix + JSON.list_recipes(self, recipes) + suffix", "def test_find_all_substrings_03():\n assert (U.find_all_substrings(s, 17, 17) ==\n U.find_all_substrings(s, 17, 18))\n s2 = ''.join([random.choice(s) for i in range(100)])\n assert (U.find_all_substrings(s2, 17, 17) ==\n U.find_all_substrings(s2, 17, 18))", "def ore_required(recipes, fuel_required=1):\n\n stock_required = {'FUEL': fuel_required}\n is_finished = False\n\n while not is_finished:\n sku, sku_quantity_required = next((sku, sku_quantity_required)\n for sku, sku_quantity_required in stock_required.items() if sku_quantity_required > 0 and sku != 'ORE')\n min_batch_size, bom = recipes[sku]\n batches_required = math.ceil(sku_quantity_required/min_batch_size)\n for quantity, ingredient in bom:\n stock_required[ingredient] = stock_required.get(\n ingredient, 0) + batches_required * quantity\n stock_required[sku] -= batches_required * min_batch_size\n is_finished = len([(sku, sku_quantity_required) for sku, sku_quantity_required in stock_required.items()\n if sku_quantity_required > 0 and sku != 'ORE']) == 0\n\n return stock_required['ORE']", "def task2(dictionary):\n word_count = Counter(dictionary)\n ans = word_count.most_common(10)\n print(ans)\n return ans", "def brute_force_cow_transport(cows,limit=10):\r\n \r\n def addCowWeight(list, cows):\r\n \"\"\"adds a list of cows, with value coming from dict\"\"\"\r\n sum = 0.0\r\n for key in list:\r\n sum += cows[key]\r\n return sum\r\n \r\n #list of cows\r\n cowName = (cows.keys())\r\n ##cowName = ['Maggie', 'Lola', 'Oreo']\r\n \r\n #list to store all partitions and useful ones\r\n allPart = []\r\n usePart = []\r\n \r\n for part in get_partitions(cowName):\r\n allPart.append(part)\r\n\r\n #make a test that checks each trip list if their sum <= limit\r\n #adds each partition that passes all tests to usePart\r\n for part in allPart:\r\n test = []\r\n for trip in part:\r\n if addCowWeight(trip, cows) <= limit:\r\n test.append(trip)\r\n\r\n if len(test) == len(part):\r\n usePart.append(part)\r\n \r\n #find all the lengths of each option, and search for smallest\r\n lenIndex = []\r\n for part in usePart:\r\n lenIndex.append(len(part))\r\n\r\n find = min(lenIndex)\r\n\r\n for part in usePart:\r\n if len(part) == find:\r\n return part", "def fizzbuzz(n,additional_rules=None):\n rules = {3:\"Fizz\",5:\"Buzz\"}\n ans = \"\"\n if additional_rules:\n rules.update(additional_rules)\n for divisor in sorted(rules.keys()):\n if n%divisor == 0:\n ans += rules[divisor]\n if not ans:\n ans = str(n)\n return ans", "def brute_force(seats):\n for seat in seats:\n if seat + 1 not in seats and seat + 2 in seats:\n return seat + 1\n\n return None", "def dawkins_algorithm(strlen=28):\n copies = copy_string(generate_string(strlen))\n new_score = 0\n\n while new_score < 28:\n for index, s in enumerate(copies):\n current_score = compare(s)\n mutated_string = mutate_string(s)\n new_score = compare(mutated_string)\n if new_score > current_score:\n copies[index] = mutated_string\n print(mutated_string)\n\n if new_score == strlen:\n break\n\n copies = copy_string(find_best_candidate(copies))\n\n return print(\"Done\")", "def test_get_top_n_words_more_number(self):\n expected = ['man', 'happy']\n actual = get_top_n_words({'happy': 2, 'man': 3}, 10)\n self.assertEqual(expected, actual)", "def solution(s):", "def process_recipe(self, recipe: int):\n\n recipe = CoffeeMachine.recipes.get_recipe(recipe)\n\n if (self.water - recipe.water) < 0:\n print('Sorry, not enough water!')\n return\n\n if (self.milk - recipe.milk) < 0:\n print('Sorry, not enough milk!')\n return\n\n if (self.beans - recipe.beans) < 0:\n print('Sorry, not enough coffee beans!')\n return\n\n if self.cups == 0:\n print('Sorry, not enough coffee disposable cups!')\n return\n\n print('I have enough resources, making you a coffee!')\n\n # Charging\n self.water -= recipe.water\n self.milk -= recipe.milk\n self.beans -= recipe.beans\n self.money += recipe.cost\n self.cups -= 1", "def digest(sequence, enzyme, count_missed_cleavages=None, no_missed_cleavages=False):\n tmp = \"\"\n result = []\n additionals = list()\n # for backwards compatibility e.g. sole use of kwarg \"no_missed_cleavages\"\n # and no use of no_missed_cleavages\n if count_missed_cleavages is None: # i.e. not set\n if no_missed_cleavages is False:\n count_missed_cleavages = 2\n else:\n count_missed_cleavages = 0\n\n cleavage_aa, site = enzyme\n for p, aa in enumerate(sequence):\n if aa == \"*\":\n continue\n tmp += aa\n if aa in cleavage_aa:\n if site == \"C\":\n result.append(tmp)\n tmp = \"\"\n elif site == \"N\":\n\n result.append(tmp[0 : len(tmp) - 1])\n tmp = \"\"\n tmp += aa\n if tmp != \"\":\n result.append(tmp)\n if count_missed_cleavages > len(result):\n count_missed_cleavages = len(result)\n\n if count_missed_cleavages == 0:\n additionals = result\n else:\n for r in range(len(result)):\n # r is the index of each fully-cleaved peptide in the list from above\n for mc in range(r, len(result) + 1):\n # now starting with 'r' we interrogate all other pepitdes and build further peptides\n # up to the desired number of missed cleavages\n if mc - r >= count_missed_cleavages:\n continue\n if mc + 2 > len(result):\n # i.e. if are over end of list\n continue\n # need to add 2 to mc a it's a location marker.\n # mc is essentially the first peptide in the list\n newpep = \"\".join(result[r : mc + 2])\n if newpep != \"\":\n additionals.append(newpep)\n additionals += result\n return additionals", "def _recipe_details_generator(self, converted_content, overview_recipe):\n def get_cooking_shop_strings(lines):\n ret = []\n buf = None\n is_recipe_step_area = False\n for l in lines:\n if re.search(\"軒目\", l.strip()) or re.match(r\"^[①-⑳*].*『.*』\", l.strip()) or re.match(r\"^[①-⑳*].*「.*」\", l.strip()):\n if buf:\n ret.append(buf)\n buf = l.strip()\n continue\n\n if re.search(\"^(料理|万能調味料)\", l.strip()):\n is_recipe_step_area = False\n\n if re.search(\"^材料\", l.strip()):\n title, materials = re.search(\"(材料)(.*)\", l.strip()).groups()\n # buf += \"\\n\" + \"\\n\".join(l.strip().split(None, 1))\n buf += \"\\n\" + title + \"\\n\" + materials.strip()\n continue\n\n if re.search(\"^作り方\", l.strip()):\n is_recipe_step_area = True\n title, recipe_steps = re.search(\"(作り方)(.*)\", l.strip()).groups()\n # buf += \"\\n\" + \"\\n\".join(l.strip().split(None, 1))\n buf += \"\\n\" + title + \"\\n\" + recipe_steps.strip()\n continue\n \n if buf:\n if is_recipe_step_area:\n if re.match(r\"^[①-⑳*]\", l.strip()):\n buf += \"\\n\" + l.strip()\n else:\n buf += l.strip()\n else:\n buf += \"\\n\" + l.strip()\n if buf:\n ret.append(buf)\n\n return ret\n \n \n for ii, l in enumerate(converted_content.splitlines()):\n if ii == 1:\n overview_recipe.cooking_name_sub = l.strip()\n continue\n \n if -1 < l.find(\"初回放送\"):\n overview_recipe.program_date = dateutil.parser.parse(\"/\".join(re.search(r\"(\\d+)\\D+(\\d+)\\D+(\\d+)\\D+\", l).groups()))\n break\n\n cooking_shop_strings = get_cooking_shop_strings(converted_content.splitlines())\n\n logger.debug(\"-\" * 20)\n logger.debug(cooking_shop_strings)\n for shop_string in cooking_shop_strings:\n recipe_shop = None\n recipe = None\n is_material_area = False\n is_recipe_step_area = False\n for l in shop_string.splitlines():\n if len(l.strip()) == 0:\n continue\n \n if is_material_area == False and is_recipe_step_area == False:\n if re.search(\"軒目\", l.strip()) or re.match(r\"^[①-⑳*].*『.*』\", l.strip()) or re.match(r\"^[①-⑳*].*「.*」\", l.strip()):\n recipe_shop = copy.deepcopy(overview_recipe)\n recipe = None\n \n m = re.search(r\"「(.*)」\", l)\n if m:\n recipe_shop.cooking_name_sub += \"/\" + m.group(1)\n else:\n m2 = re.search(r\"『(.*)』\", l)\n if m2:\n recipe_shop.cooking_name_sub += \"/\" + m2.group(1)\n \n continue\n \n if re.search(\"^(料理|万能調味料)\", l.strip()):\n is_material_area = False\n is_recipe_step_area = False\n if recipe:\n yield recipe\n\n if recipe_shop:\n recipe = copy.deepcopy(recipe_shop)\n else:\n recipe = copy.deepcopy(overview_recipe)\n \n if -1 < l.find(\":\"):\n recipe.cooking_name = l.split(\":\")[1].strip()\n elif -1 < l.find(\":\"):\n recipe.cooking_name = l.split(\":\")[1].strip()\n elif re.search(r\"^(料理|万能調味料)[①-⑳]\", l.strip()):\n # https://www.nhk.or.jp/program/manpuku/recipe/dg0_200115.pdf\n # 料理①カルパッチョ\n recipe.cooking_name = l.strip()[3:].strip()\n else:\n recipe.cooking_name = l.split(None, 1)[1].strip()\n continue\n \n if re.search(\"^材料\", l.strip()):\n is_material_area = True\n is_recipe_step_area = False\n if l.strip() == \"材料\":\n continue\n \n if re.search(\"^作り方\", l.strip()):\n is_material_area = False\n is_recipe_step_area = True\n if l.strip() == \"作り方\":\n pass\n else:\n l = l.replace(\"作り方\", \"\", 1)\n # recipeがNoneの場合はエラーとして検出したい\n recipe.recipe_steps.append(RecipeText(l.strip()))\n continue\n \n \n if is_material_area:\n for material in l.strip().split(\"、\"):\n material = material.strip()\n if len(material):\n if material.startswith(\"(\"):\n recipe.materials.append(RecipeText(material))\n else:\n recipe.materials.append(RecipeText(material.replace(\"(\", \": \").replace(\")\", \"\")))\n \n if is_recipe_step_area:\n recipe.recipe_steps.append(RecipeText(l.strip()))\n if recipe:\n yield recipe", "def fast():\n #get the first 4-6 digit primes\n for num in xrange(1000,1000000+1):\n if not is_prime[num]:\n continue\n num = str(num)\n do_check = False\n for r in '012':\n if num.count(r):\n do_check = True\n break\n if not do_check:\n continue\n prime_candidates = []\n fail_count = 0\n for m in '0123456789':\n check_n = num.replace(r, m)\n if check_n[0] != '0' and is_prime[int(check_n)]:\n prime_candidates.append(check_n)\n else:\n fail_count += 1\n if fail_count > 2:\n break\n if fail_count <= 2:\n print \"Answer:\", prime_candidates[0]\n break", "def solution(self):\n return [(\"the\", 1579644)] * 100", "def _check_recipes(self, recipes):\n\n ret = {}\n if type(recipes) is not dict:\n print(\"Error: recipes is not type 'dict'!\")\n return ret\n\n for (recipe, flavors) in recipes.items():\n if type(flavors) is not dict:\n print(\"Error: recipe %s does not contain a dict of flavors\"%recipe)\n continue\n ret[recipe] = {}\n for (flav, amount) in flavors.items():\n if type(amount) is not int and type(amount) is not float:\n print(\"Error: flavor %s has non-numeric amount: %s\"%(flav, amount))\n continue\n # always assume percent\n amount = amount / 100.0\n ret[recipe][flav] = amount\n\n return ret", "def test_get_top_n_words_incorrect_numbers(self):\n expected = []\n actual = get_top_n_words({}, -1)\n self.assertEqual(expected, actual)\n actual = get_top_n_words({'happy': 2}, 0)\n self.assertEqual(expected, actual)", "def randomInt(catsString):\n wordList = specificWordList(catsString)\n maximum = len(wordList) - 1\n half = round((xAppeared(\">\") - xAppeared(\"<\")) / 2)\n while True:\n randID = random.randint(0,maximum)\n word = wordList[randID]\n if randomBoolean():\n return randID\n elif word.times_appeared < half:\n return randID", "def find():\n b = 0\n q = 0\n while b == q:\n seq = [randint(-10, 10) for _ in range(randint(15, 30))]\n b, b_at = brute_force(seq)\n q = solution(seq)\n print(seq, b, q, b_at)", "def search_recipes(request):\n\n string_to_find = request.GET.get(\"term\", None)\n\n if string_to_find is None:\n return HttpResponse(status=400)\n\n matching_recipes = Recipe.objects.filter(title__icontains=string_to_find)\n\n context = {}\n for r in matching_recipes:\n context[r.title] = reverse('recipes:recipe', kwargs={'recipe_slug': r.slug})\n\n return HttpResponse(json.dumps(context), content_type='application/json')", "def recipe(id):\n\n selected_recipe = mongo.db.recipes.find_one({'_id': ObjectId(id)})\n\n # Using create list function to display these sections easier\n display_method = create_list(selected_recipe[\"method\"])\n display_ingredients = create_list(selected_recipe[\"ingredients\"])\n display_equipment = create_list(selected_recipe[\"equipment\"])\n\n show_ad = make_comparison(ad_equipment, display_equipment)\n\n return render_template('view_recipe.html', recipe=selected_recipe,\n title='Recipe', display_method=display_method,\n ad_equipment=ad_equipment,\n display_ingredients=display_ingredients,\n display_equipment=display_equipment,\n show_ad=show_ad)", "def best_match(beer):\n # get a list of donuts that match sugar content for beer\n candidates = get_candidates(beer)\n span = tracer.current_span()\n span.set_tag('donuts.candidates', candidates)\n\n # send the remaining candidates to our taster and pick the best\n max_score = -1\n best_match = None\n\n for candidate in candidates:\n try:\n resp = requests.get(\n \"http://taster:5001/taste\",\n params={\"beer\": beer.name, \"donut\": candidate},\n timeout=2,\n )\n except requests.exceptions.Timeout:\n continue\n\n score = resp.json()[\"score\"]\n if score > max_score:\n max_score = score\n best_match = candidate\n\n return best_match", "def test_hackerrank_sample1(self):\n result = find_digits(12)\n self.assertEquals(result, 2)", "def heads_legs(heads, legs):\n for i in range(0, heads + 1):\n cows = heads - i\n if 4 * cows + 2 * i == legs:\n chickens = i\n return chickens, cows\n return \"No solutions.\"", "def get_recipe(recipe_id,\n ingredients=True,\n instructions=True,\n session=None):\n if session is None:\n session = requests.Session()\n payload = {'recipeid': recipe_id,\n 'getIngredients': 'true' if ingredients else 'false',\n 'getInstructions': 'true' if instructions else 'false'}\n response = session.get('https://cms.sortedfood.com/apiRecipe/getRecipe',\n params=payload)\n if response.status_code == 200:\n response.encoding = 'utf-8'\n return json.loads(response.text)\n else:\n raise requests.HTTPError(response)", "def show_fav_recipes():\n if not g.user:\n flash(\"Please login to view.\",\"warning\")\n return redirect('/login')\n \n data = search_recipes(request) \n favorite_list = [l.id for l in g.user.recipes]\n favorites = [f['id'] for f in data['results'] if f['id'] in favorite_list]\n \n\n return render_template(\"favs/show.html\", favorites=favorites)", "def test_get_recipe_taste_by_id(self):\n pass", "def cardinal(n, friendly=True):\n if friendly:\n n_abs = abs(n)\n\n if n_abs < 20:\n return cardinal(n, friendly=False)\n\n if n_abs < 100 and n_abs % 10 == 0:\n return cardinal(n, friendly=False)\n\n if n_abs < 1000 and n_abs % 100 == 0:\n return cardinal(n, friendly=False)\n\n if n_abs < 12000 and n_abs % 1000 == 0:\n return cardinal(n, friendly=False)\n\n prefix = \"min \" if n < 0 else \"\"\n\n if n_abs < MILLION:\n q, r = divmod(n_abs, 1000)\n if r == 0:\n return prefix + \"%d duizend\" % q\n\n if n_abs < BILLION:\n q, r = divmod(n_abs, MILLION)\n if r == 0:\n return prefix + \"%d miljoen\" % q\n\n # No friendly variant, just return the numerical representation.\n return unicode(n)\n\n # Code below completely spells out each number.\n\n if n < 0:\n return \"min \" + cardinal(abs(n))\n\n if n < 20:\n return UNITS[n]\n\n if n < 100:\n q, r = divmod(n, 10)\n a = TENS[q]\n if r == 0:\n return a\n b = cardinal(r)\n joiner = \"en\" if not b.endswith(\"e\") else \"ën\"\n return b + joiner + a\n\n if n < 1000:\n q, r = divmod(n, 100)\n a = cardinal(q, friendly=False) if q > 1 else \"\"\n b = cardinal(r, friendly=False) if r > 0 else \"\"\n return a + \"honderd\" + b\n\n if 1000 < n < 10000 and n % 1000:\n # Special case for numbers that are exactly divisble by 100, but\n # not by 1000, e.g. \"tweeëntwintighonderd\"\n q, r = divmod(n, 100)\n if r == 0:\n a = cardinal(q, friendly=False) if q > 1 else \"\"\n return a + \"honderd\"\n\n if n < MILLION:\n q, r = divmod(n, 1000)\n a = cardinal(q, friendly=False) if q > 1 else \"\"\n b = \" \" + cardinal(r, friendly=False) if r > 0 else \"\"\n return a + \"duizend\" + b\n\n if n < BILLION:\n q, r = divmod(n, MILLION)\n a = cardinal(q, friendly=False)\n b = \" \" + cardinal(r, friendly=False) if r > 0 else \"\"\n return a + \" miljoen\" + b\n\n if n < TRILLION:\n q, r = divmod(n, BILLION)\n a = cardinal(q, friendly=False)\n b = \" \" + cardinal(r, friendly=False) if r > 0 else \"\"\n return a + \" miljard\" + b\n\n if n < QUADRILLION:\n q, r = divmod(n, TRILLION)\n a = cardinal(q, friendly=False)\n b = \" \" + cardinal(r, friendly=False) if r > 0 else \"\"\n return a + \" biljoen\" + b\n\n # Fallback to numerical representation\n return unicode(n)", "def solve_part1(start):\n all_ilists = load_inputs()\n\n allergen_map = get_allergen_map(all_ilists)\n all_ingredients = get_all_ingredients(all_ilists)\n\n all_potential_bad_ingredients = set()\n\n for l in allergen_map.values():\n all_potential_bad_ingredients.update(l)\n\n safe_ingredients = [a for a in all_ingredients if a not in all_potential_bad_ingredients]\n\n safe_ingred_count = 0\n for ilist in all_ilists:\n this_ingredients = ilist.get_ingredients()\n this_safe_ingredients = [a for a in this_ingredients if a in safe_ingredients]\n safe_ingred_count += len(this_safe_ingredients)\n\n return safe_ingred_count", "def fizzbuzz(n: int) -> None:\n for i in range(1, n + 1):\n s = ''\n if i % 3 == 0:\n s += 'fizz'\n if i % 5 == 0:\n s += 'fuzz'\n if s == '':\n s += str(i)\n print(s)", "def get_recipe(r_link):\n recipe_link = RECIPE_URL.format(r_link)\n recipe_response = get_content_from_static_url(recipe_link)\n if not recipe_response:\n print \"no content for: \", recipe_link\n return None\n return BeautifulSoup(recipe_response)", "def checksufficientresources(ingredients):\n for item in ingredients:\n if ingredients[item] >= resources[item]:\n print(f\"Sorry there is not enough {item}\")\n return False\n return True", "def test_get_food_most_calories_smaller_population():\n df_breakfast = df[df['Category'] == 'Breakfast']\n\n actual = get_food_most_calories(df_breakfast)\n expected = 'Big Breakfast with Hotcakes (Large Biscuit)'\n assert actual == expected", "def test_basic(self):\n self.assertEqual(solution(\"\"\"11111\n19991\n19191\n19991\n11111\"\"\"), 6)\n self.assertEqual(solution(\"\"\"5483143223\n2745854711\n5264556173\n6141336146\n6357385478\n4167524645\n2176841721\n6882881134\n4846848554\n5283751526\"\"\"), 195)", "def test_known_common_stable_isotopes():\n\n known_should_be = [\"H-1\", \"D\", \"T\", \"H-4\", \"H-5\", \"H-6\", \"H-7\"]\n common_should_be = [\"H-1\", \"D\"]\n stable_should_be = [\"He-3\", \"He-4\"]\n\n assert known_isotopes(\"H\") == known_should_be, (\n f\"known_isotopes('H') should return {known_should_be}, but is \"\n f\"instead returning {known_isotopes('H')}\"\n )\n\n assert common_isotopes(\"H\") == common_should_be, (\n f\"common_isotopes('H') should return {common_should_be}, but is \"\n f\"instead returning {common_isotopes('H')}\"\n )\n\n assert stable_isotopes(\"He\") == stable_should_be, (\n f\"stable_isotopes('He') should return {stable_should_be}, but is \"\n f\"instead returning {stable_isotopes('He')}\"\n )", "def recipes():\n recipes = mongo.db.recipes.find()\n return render_template(\"recipes.html\", recipes=recipes)", "def test_make_most_abundant(self):\r\n ids = \\\r\n \"R27DLI_4812 R27DLI_600 R27DLI_727 U1PLI_403 U1PLI_8969\".split(\r\n )\r\n seqs = dict(parse_fasta(dna_seqs.splitlines(),\r\n label_to_name=label_to_name))\r\n f = make_most_abundant(seqs)\r\n result = f(ids, seqs)\r\n assert result in ['R27DLI_4812', 'R27DLI_727', 'U1PLI_8969']", "def main() -> None:\n numbers = []\n with open('./inputs/day01.txt') as infile:\n for line in infile.readlines():\n numbers.append(int(line))\n\n match = find_match(numbers, 2020)\n print(match, reduce(lambda x, y: x * y, match))\n match = find_match(numbers, 2020, 3)\n print(match, reduce(lambda x, y: x * y, match))", "def recipes():\n # pylint: disable=redefined-outer-name\n recipes = list(mongo.db.recipes.find().sort('_id', -1))\n return render_template(\n \"recipes.html\", page_title=\"All Recipes\", recipes=recipes)", "def solution():\n i = 1\n\n while True:\n if (\n sorted(str(i))\n == sorted(str(2 * i))\n == sorted(str(3 * i))\n == sorted(str(4 * i))\n == sorted(str(5 * i))\n == sorted(str(6 * i))\n ):\n return i\n\n i += 1" ]
[ "0.62732804", "0.61937374", "0.6106467", "0.59431934", "0.59081733", "0.5582103", "0.5502457", "0.5500289", "0.5376491", "0.53712493", "0.53555405", "0.53383607", "0.52812743", "0.52651393", "0.5197361", "0.5169107", "0.516706", "0.51159555", "0.5107954", "0.51040334", "0.50993323", "0.50930053", "0.50803745", "0.50738853", "0.50727516", "0.5061389", "0.50247467", "0.5018143", "0.5017217", "0.50040656", "0.49855003", "0.4984301", "0.49753863", "0.4967604", "0.49600962", "0.49380058", "0.4929365", "0.49194592", "0.49112794", "0.49082518", "0.49047396", "0.48990065", "0.48951927", "0.48903406", "0.48873374", "0.48868594", "0.48841947", "0.48737317", "0.487251", "0.48530427", "0.48322368", "0.48221937", "0.48116744", "0.481031", "0.4807339", "0.48042208", "0.48019084", "0.4799535", "0.47953826", "0.47948495", "0.4791926", "0.47899097", "0.4787333", "0.47849962", "0.47818446", "0.47801414", "0.47766373", "0.4775337", "0.4773967", "0.47599408", "0.4757961", "0.475448", "0.47507116", "0.47390017", "0.47384244", "0.47336257", "0.47225812", "0.47191605", "0.47154692", "0.47034404", "0.47029206", "0.4700909", "0.4699275", "0.46973386", "0.46897677", "0.46890715", "0.4685044", "0.4683386", "0.4681277", "0.4678942", "0.46778893", "0.46740064", "0.46721137", "0.46666136", "0.46643996", "0.46558765", "0.46428287", "0.46382493", "0.46339485", "0.46294957" ]
0.7260232
0
>>> measure_the_work('51589') 9 >>> measure_the_work('01245') 5 >>> measure_the_work('92510') 18 >>> measure_the_work('59414') 2018
def measure_the_work(pattern_to_find): recipes = [3, 7] keys = [int(key) for key in pattern_to_find] elf1 = 0 elf2 = 1 not_found = True while not_found: elf1_value = recipes[elf1] elf2_value = recipes[elf2] recipe_sum = elf1_value + elf2_value if recipe_sum > 9: recipe_string = f"{recipe_sum:02d}" recipes.append(int(recipe_string[:1])) recipes.append(int(recipe_string[1:])) else: recipes.append(recipe_sum) elf1 = loop_around(1 + elf1 + elf1_value, len(recipes)) elf2 = loop_around(1 + elf2 + elf2_value, len(recipes)) if recipes[-1] == keys[-1] or recipes[-2] == keys[-1]: if pattern_to_find in ''.join(map(str, recipes[-(len(keys) + 2):])): not_found = False if recipes[-1] == keys[-1]: return len(recipes) - len(keys) else: return len(recipes) - len(keys) - 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def micros() -> int:", "def getMeasure(unique_name):", "def getMeasure(unique_name):", "def compute(data):\n spoken = collections.defaultdict(lambda: collections.deque(maxlen=2))\n\n starting = map(int, data.split(\",\"))\n last_number = None\n i = 0\n\n for i, last_number in enumerate(starting, start=i + 1):\n spoken[last_number].append(i)\n\n for i in range(i + 1, 30000001):\n if len(spoken[last_number]) <= 1:\n last_number = 0\n else:\n last_number = spoken[last_number][1] - spoken[last_number][0]\n\n spoken[last_number].append(i)\n\n return last_number", "def approximateTime(meal):\n RATE = 4.2535969274764765e-05 # seconds per character.\n time = len(meal)**1 * RATE\n return time", "def get_analysis(num_hours):\n\n # These are all different strings that will get returned based on how well\n # the user performed last week (in terms of hours played)\n did_well = '\\n😁 Great Job! Looks like you have been really' + \\\n ' productive last week.'\n did_alright = '\\n🙂 Looks like you have been pretty productive last ' + \\\n 'week. You might want to limit your hours for next week to' + \\\n ' increase productivity even more!'\n did_poorly = '\\n😦 Seems like you play a lot! Definitely try limiting ' + \\\n 'your hours to be more productive.'\n did_horrible = '\\n😡 You should really limit your hours, you probably' + \\\n ' cannot get much work done if you play this much.'\n\n # Depending on how many hours are played, a different analysis string \n # is returned\n if num_hours <= 1:\n return did_well\n elif num_hours > 1 and num_hours <= 5:\n return did_alright\n elif num_hours > 5 and num_hours <= 10:\n return did_poorly\n else:\n return did_horrible", "def test_count_880_000(self):\n value: int = 880_000\n result: int = 472_910\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')", "def run(data):\n max_calories = 0\n calories = 0\n for item in data:\n item = item.strip()\n if item != \"\":\n calories += int(item)\n else:\n max_calories = set_max(calories, max_calories)\n calories = 0\n max_calories = set_max(calories, max_calories)\n\n return max_calories", "def test_count_773_000(self):\n value: int = 773_000\n result: int = 411_864\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')", "def runtime_cal(start,end) :\n run_time = end - start\n mm = int(run_time/60)\n ss = round(run_time%60)\n return mm, ss", "def solution(self):\n return [(\"the\", 1561900)] * 100", "def lab_run_med(character_id, time_step):\n pass", "def solution(self):\n return [(\"the\", 1579644)] * 100", "def run_p2(data):\n calorie_list = []\n calories = 0\n for item in data:\n item = item.strip()\n if item != \"\":\n calories += int(item)\n else:\n calorie_list.append(calories)\n calories = 0\n calorie_list.append(calories)\n\n return sum(sorted(calorie_list, reverse=True)[:3])", "def test_count_883_904(self):\n value: int = 883_904\n result: int = 475_749\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')", "def test_count_770_000(self):\n value: int = 770_000\n result: int = 410_562\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')", "def run_timings():\n\n running_times = []\n\n while recorded_time := input(f\"Enter your 10k time: \"):\n if not recorded_time:\n break\n running_times.append(float(recorded_time))\n average_pace = sum(running_times) / len(running_times)\n return average_pace", "def question_18(list_str: str) -> int:\n return int(sum(len(i) for i in list_str) / len(list_str))", "def extract_score(a): \r\n if \"reps\" in a:\r\n return int(a.split(\" \")[0])\r\n elif \"kg\" in a:\r\n return int(a.replace(\" kg\",\"\"))\r\n elif \"lb\" in a:\r\n lb = int(a.replace(\" lb\",\"\"))\r\n kg = round(lb * 0.45359237, 0)\r\n return kg\r\n else:\r\n return pd.to_timedelta('0:'+a)", "def find_duration(data):\n t = [i[0] for i in data]\n duration = t[len(t) - 1] - t[0]\n logging.info('Calculated duration: %s', duration)\n return duration", "def test_count_800_000(self):\n value: int = 800_000\n result: int = 426_983\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')", "def test_count_700_000(self):\n value: int = 700_000\n result: int = 374_421\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')", "def estimate_runtime():\n cpu_count = multiprocessing.cpu_count()\n years = request.args.get('years', type=int)\n if not years:\n raise InvalidUsage(\n \"The 'years' param is required and must be an integer >= 1\")\n time_per_year = 4 # seconds\n diluted_cpu_effect = 1 + (1.0 * cpu_count / 10)\n raw_guess = time_per_year * math.ceil(1.0 * years / diluted_cpu_effect)\n result = int(max(raw_guess, 15))\n return jsonify({'seconds': result})", "def test_count_773_904(self):\n value: int = 773_904\n result: int = 412_264\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')", "def sixteen():\r\n \r\n number = str(pow(2, 1000))\r\n sum = 0\r\n \r\n for i in number:\r\n sum += int(i)\r\n \r\n return sum", "def get_workex_months(current_bucket_workex):\r\n\r\n computed_workex = re.findall(r\"\\d+\", current_bucket_workex)\r\n if len(computed_workex) > 0:\r\n return computed_workex[0]\r\n return 0", "def timeInWords(hour, minutes):\n num_to_word = {\n 1: \"one\", 2: \"two\", 3: \"three\", 4: \"four\", 5: \"five\",\n 6: \"six\", 7: \"seven\", 8: \"eight\", 9: \"nine\", 10: \"ten\",\n 11: \"eleven\", 12: \"twelve\", 13: \"thirteen\", 14: \"fourteen\",\n 15: \"quarter\", 16: \"sixteen\", 17: \"seventeen\",\n 18: \"eighteen\", 19: \"nineteen\", 20: \"twenty\", 30: \"half\",\n 40: \"forty\", 45: \"quarter\", 50: \"fifty\"\n }\n\n if minutes == 0:\n return (f\"{num_to_word[hour]} o' clock\")\n elif 1 <= minutes <= 30:\n if minutes == 15 or minutes == 30:\n return (f\"{num_to_word[minutes]} past {num_to_word[hour]}\")\n if 21 <= minutes <= 29:\n last_digit = minutes - 20\n last_digit = num_to_word[last_digit]\n return (f\"{num_to_word[20]} {last_digit} minutes past {num_to_word[hour]}\")\n else:\n return (f\"{num_to_word[minutes]} minutes past {num_to_word[hour]}\")\n else:\n if minutes == 45:\n return (f\"{num_to_word[60-minutes]} to {num_to_word[hour+1]}\")\n if 31 <= minutes <= 39:\n remainder_20 = 60 - minutes\n last_digit = remainder_20 - 20\n last_digit = num_to_word[last_digit]\n return (f\"{num_to_word[20]} {last_digit} minutes to {num_to_word[hour+1]}\")\n else:\n return (f\"{num_to_word[60-minutes]} minutes to {num_to_word[hour+1]}\")", "def get_number_of_measurement(self):\n num_of_meas = 0\n for time in self.mdvtc.keys():\n num_of_meas = num_of_meas + self.mdvtc[time].get_number_of_measurement()\n #\n return num_of_meas", "def MEAN(strArg, composList, atomDict):\n accum = 0.0\n nSoFar = 0\n for atom, num in composList:\n tStr = strArg.replace('DEADBEEF', atom)\n accum = accum + eval(tStr) * num\n nSoFar = nSoFar + num\n return accum / nSoFar", "def measure(self, recommender):", "def measure_number(self):\n return self._measure_number", "def calc_work_stats( work_xps: List[Dict[str, Any]] ):\n durations = [ rec['duration'] for rec in work_xps if 'duration' in rec ]\n total_years = sum( durations ) if durations else None\n avg_years = np.round( total_years / len(durations), 2) if durations else None\n poss_lt2_years = sum( 1 for dur in durations if dur < 2.0 )\n poss_lt1_2_years = sum(1 for dur in durations if dur < 1.2 )\n\n has_worked_abroad = any( rec for rec in work_xps\n if _is_location_abroad( rec.get('location_raw') ))\n\n return { \"total_experience_yrs\": total_years,\n 'avg_years': avg_years,\n 'n_work_positions': len(durations),\n 'poss_lt2_years': poss_lt2_years,\n 'poss_lt1.2_years': poss_lt1_2_years,\n 'has_worked_abroad': has_worked_abroad }\n # %%", "def calc_weight(str,dict):\n for i,c in enumerate(str):\n dict[c] += 10**(len(str)-(i+1))", "def num2words(num):\n # Create a dictionary of all unique numbers from 1 to 1,000\n num2words = {0:'', 1:'one', 2:'two', 3:'three', 4:'four', 5:'five', 6:'six', 7:'seven',\\\n 8:'eight', 9:'nine', 10:'ten', 11:'eleven', 12:'twelve', 13:'thirteen', 14:'fourteen',\\\n 15:'fifteen', 16:'sixteen', 17:'seventeen', 18:'eighteen', 19:'nineteen', 20:'twenty',\\\n 30:'thirty', 40:'forty', 50:'fifty', 60:'sixty', 70:'seventy', 80:'eighty',\\\n 90:'ninety', 1000:'onethousand'}\n result = ''\n while True:\n try:\n result += num2words[num]\n return result\n except:\n pass\n try:\n result += num2words[num-num%10] + num2words[num%10]\n return result\n except:\n result += num2words[(num - num%100)//100] + 'hundred'\n num = num%100\n if num == 0:\n return result\n else:\n result += 'and'", "def getTimes():", "def getTimes():", "def getTimes():", "def generate_wf(dataset: Dataset) -> Dict[str, int]:\n wf_dict = Counter()\n\n for item in tqdm(dataset, desc=\"Calculating word frequencies\"):\n for w in item[\"review\"].split():\n wf_dict[w] += 1\n\n return wf_dict", "def question_17(list_str: str) -> float:\n return sum(len(i) for i in list_str) / len(list_str)", "def complexity(text:str) -> float:\n words = text.split(' ')\n freqs = [frequency(w) for w in words]\n return sum(freqs) / (len(frequency_list) - freqs.count(0)) #sum of the frequencies / all the words that were in the list", "def clean(string):\n units = {'s':1, 'm':60, 'h':60*60, 'd':24*60*60, 'M':30*24*60*60}\n string = string.replace(' ','')\n p = re.compile('(\\d+)\\s*(\\w+)')\n num, unit = p.match(string).groups()\n num = float(num)\n return num * units[unit]", "def time(diners):\n if str(diners) in cache:\n return cache[str(diners)]\n if diners[0] <= 3:\n r = diners[0]\n cache[str(diners)] = r\n return r\n else:\n mintime = diners[0]\n for i in range(1, diners[0]//2+1):\n mintime = min(mintime, 1+time(sorted(diners[1:] + [diners[0]-i] + [i], key = lambda x: -x)))\n cache[str(diners)] = mintime\n return mintime\n # return min(diners[0], 1+time(sorted(diners[1:] + [diners[0]//2] + [diners[0]//2 + diners[0]%2], key = lambda x: -x)))\n # return min(\n # 1+time([max(0, x-1) for x in diners]),\n # 1+time(sorted(diners[1:] + [diners[0]//2] + [diners[0]//2 + diners[0]%2], key = lambda x: -x))\n # )", "def elapsed_time(word_time):\n return word_time[1]", "def lab_run_big(character_id, time_step):\n pass", "def usable_numbers(time):\n curr_val = time\n index = 0\n\n while index+1 < len(unit_key):\n unit_test = converter[unit_key[index+1]]\n if time // unit_test < 1:\n break\n index += 1\n\n return time//converter[unit_key[index]], unit_order[index]", "def run_timing():\n time_log = []\n while True:\n one_run = input(\"Enter your time for this 10 km: \")\n if not one_run:\n break\n try:\n time_log.append(float(one_run))\n except ValueError:\n print(\n \"Hey, you enter something strange, \"\n \"please enter a valid number\")\n avg_time = sum(time_log) / len(time_log)\n return f\"Your average time is about: {avg_time:.1f} \" \\\n f\"over {len(time_log)} runs\"", "def test_get_human_readable_duration():\n\n human_readable = common.get_human_readable_duration(-1)\n assert human_readable == '0 seconds'\n\n human_readable = common.get_human_readable_duration(10)\n assert human_readable == '10 seconds'\n\n human_readable = common.get_human_readable_duration(1000)\n assert human_readable == '16 minutes, 40 seconds'\n\n human_readable = common.get_human_readable_duration(10000)\n assert human_readable == '2 hours, 46 minutes, 40 seconds'", "def REC_YAHTZEE():\n return 12", "def calculate_at_content(seq):\n no_of_a = seq.count(\"A\")\n no_of_t = seq.count(\"T\")\n total = no_of_a + no_of_t\n at = total/len(seq) * 100\n \n return at", "def test_hackerrank_sample1(self):\n result = find_digits(12)\n self.assertEquals(result, 2)", "def test_sum_syllable_durations_01():\n syllables = R.main(os.path.join(\n '..', 'test', 'data', 'test_syllabics_and_melisma.xml'))[0]\n assert U.sum_syllable_durations(syllables[0]) == 0.5\n assert U.sum_syllable_durations(syllables[1]) == 1.0\n assert U.sum_syllable_durations(syllables[2]) == 2 # melismatic\n assert U.sum_syllable_durations(syllables[3]) == 0.5", "def work(char, base, scale_stat, factor):\n added = int(math.floor(char.__dict__[scale_stat] / factor))\n earned = base + added\n return [(\"gold\", earned)]", "def count_decodings(s):\n\n if len(s) == 1:\n return 1\n if len(s) == 2:\n return 2\n including_last_digit = 0\n including_last_two_digit = 0\n if int(s[-1]) > 0:\n including_last_digit = count_decodings(s[:-1])\n if int(s[-2:]) < 28:\n including_last_two_digit = count_decodings(s[:-2])\n return including_last_digit + including_last_two_digit", "def numberCounts(limit):\n\n sum = 0\n for number in range(1,limit+1):\n word = number2text(number)\n amount = countLetters(word)\n sum = sum + amount\n return sum", "def test_count_361_000(self):\n value: int = 361_000\n result: int = 187_995\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')", "def say_chunk(num):\n output_string_list = []\n num_string = str(num)\n\n units = ['zero', 'one', 'two', 'three', 'four', 'five',\n 'six', 'seven', 'eight', 'nine']\n teens = ['ten', 'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen',\n 'sixteen', 'seventeen', 'eighteen', 'nineteen']\n tens = ['twenty', 'thirty', 'forty', 'fifty', 'sixty', 'seventy', 'eighty', 'ninety']\n\n # singles\n if num < 10:\n output_string_list.append(units[num])\n\n # teens\n elif 10 <= num <= 19:\n output_string_list.append(teens[int(num) % 10])\n\n # tens\n elif 20 <= num <= 99:\n num_str = str(num)\n modifier = int(num_str[0])\n if int(num_str[1]):\n output_string_list.append(\"{}-{}\".format(tens[modifier - 2], units[int(num) % 10]))\n else:\n output_string_list.append(tens[modifier - 2])\n\n # hundreds\n elif 100 <= num <= 999:\n output_string_list.append(units[int(num_string[0])])\n output_string_list.append('hundred')\n\n num = int(num_string[1:])\n if num:\n output_string_list.append('and')\n num_string = str(num)\n modifier = int(num_string[0])\n\n if int(num_string[1]):\n output_string_list.append(\"{}-{}\".format(tens[modifier - 2], units[int(num_string[1:]) % 10]))\n else:\n output_string_list.append(tens[modifier - 2])\n\n return ' '.join(output_string_list)", "def __test_get(fill, memap):\n x = datetime.now()\n for i in range(fill):\n memap.delete((\"yumm\" + str(i)))\n y = datetime.now()\n sec = ( (y - x).total_seconds() )\n sec = (y - x).total_seconds()\n sec_per_get = sec / fill\n micsec = sec_per_get * 1000000\n print(\"Toook an average of {} microseconds to get each item\".format(micsec))\n return micsec", "def compute_run_duration(flasher_data, initial_delay):\n if initial_delay is None:\n tot = 0\n else:\n tot = initial_delay\n\n for pair in flasher_data:\n tot += pair[1] + 10\n\n return tot", "def solveProblem048():\n # Trivial to brute force with modern hardware.\n sd = 0\n for i in range(1,1000+1):\n sd += i**i\n s = str(sd)\n s = s[-10:]\n print(s)", "def duration(examiners_data):\n max_end = 0\n for jury in examiners_data:\n examiner_number, exams = jury[\"Number\"], jury[\"Exams\"]\n for student_number, exam_time in exams.items():\n if exam_time + durations[examiner_number] > max_end:\n max_end = exam_time + durations[examiner_number]\n\n return max_end", "def test_count_361_087(self):\n value: int = 361_087\n result: int = 188_067\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')", "def test_count_2645(self):\n value: int = 1719\n result: int = 723\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')", "def get_week_kilometers():\n config = get_config()\n\n now = datetime.now()\n challenge_start = datetime.fromtimestamp(int(config['CHALLENGE']['start_timestamp']))\n monday_current_week = now - timedelta(days=now.weekday())\n monday_challenge_start = challenge_start - timedelta(days=challenge_start.weekday())\n\n passed_weeks = int((monday_current_week - monday_challenge_start).days / 7)\n return int(config['CHALLENGE']['start_kilometers']) + passed_weeks", "def test_count_459(self):\n value: int = 459\n result: int = 148\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')", "def calculate_score(rack,word):\n score = 0\n\n for letter in word:\n score += SCORE_DICT[letter]\n if len(rack) < 7:\n score = score\n else:\n if len(word) >= len(rack):\n score = (score + 50)\n return score", "def get_number_events_mil(measure_table, measure_id):\n num_events = measure_table[measure_id].sum()\n return num_events, np.round(num_events / 1_000_000, 2)", "def MICRO(reported, table_report, recount, W, L):\n micro = 0\n for pw in W:\n for pl in L:\n if pw != pl:\n x = d(L[pl]) * e(pw, table_report, recount) - d(W[pw]) * e(pl, table_report, recount)\n y = (d(L[pl]) * reported[pw] - d(W[pw]) * reported[pl])\n micro = max(micro, x / y)\n\n return micro", "def test_count_1_000_000_000_000_000_000(self):\n value: int = 1_000_000_000_000_000_000\n result: int = 264160473575034274\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')", "def calculateCal(self,string):\n\t\tglobal total\n\t\ttotal = 0\n\t\tstring = string.upper()\n\t\tlis = tokenize.sent_tokenize(string)\n\t\tfor string1 in lis:\n\t\t\tfood, qual = string1.split()\n\t\t\tfood = food.lower()\n\t\t\tqual = float(qual[:-1])\n\t\t\tif (dictfood.has_key(food)):\n\t\t\t\tcal = dictfood[food]\n\t\t\telse:\n\t\t\t\tcal = 0\n\t\t\ttotal = total + cal*qual\n\t\treturn total", "def elapsed_micros(start: int, /) -> int:", "def _unit_wk(self):\n return ((self.time_base * 60.0) * 24.0) * 7", "def post_process(result):\n result = result.replace(' ', '')\n\n digits = re.findall('(\\d)', result)\n words = re.findall('(\\D+)', result)\n if digits:\n if len(digits) == NUM_OF_DIGITS:\n return ''.join(digits), SUCCESS_RATE ** len(digits)\n elif len(digits) < NUM_OF_DIGITS and words:\n try:\n digits_from_words = [replace_similar_sound(word) for word in words]\n for (i, (word, digit)) in enumerate(zip(words, digits_from_words)):\n if digit:\n digits.insert(result.index(word), digit)\n result = result.replace(word, digit)\n return ''.join(digits), SUCCESS_RATE ** len(digits)\n except Exception:\n pass\n return randomize_difference(digits, NUM_OF_DIGITS - len(digits))", "def test_count_1719(self):\n value: int = 2645\n result: int = 1113\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')", "def sub_calculate_misms(my_line, misms):\n if misms == 0.0:\n gmism = tmism = cmism = amism = str(0.0)\n elif origbase == 'A':\n amism = str(0.0)\n gmism = str(float(int(my_line[7]) + int(my_line[12])) / misms)\n tmism = str(float(int(my_line[8]) + int(my_line[13])) / misms)\n cmism = str(float(int(my_line[9]) + int(my_line[14])) / misms)\n elif origbase == 'C':\n amism = str(float(int(my_line[6]) + int(my_line[11])) / misms)\n gmism = str(float(int(my_line[7]) + int(my_line[12])) / misms)\n tmism = str(float(int(my_line[8]) + int(my_line[13])) / misms)\n cmism = str(0.0)\n elif origbase == 'G':\n amism = str(float(int(my_line[6]) + int(my_line[11])) / misms)\n gmism = str(0.0)\n tmism = str(float(int(my_line[8]) + int(my_line[13])) / misms)\n cmism = str(float(int(my_line[9]) + int(my_line[14])) / misms)\n else:\n amism = str(float(int(my_line[6]) + int(my_line[11])) / misms)\n gmism = str(float(int(my_line[7]) + int(my_line[12])) / misms)\n tmism = str(0.0)\n cmism = str(float(int(my_line[9]) + int(my_line[14])) / misms)\n return amism, cmism, gmism, tmism", "def thirteen():\r\n \r\n numbers = [37107287533902102798797998220837590246510135740250,\r\n 46376937677490009712648124896970078050417018260538,\r\n 74324986199524741059474233309513058123726617309629,\r\n 91942213363574161572522430563301811072406154908250,\r\n 23067588207539346171171980310421047513778063246676,\r\n 89261670696623633820136378418383684178734361726757,\r\n 28112879812849979408065481931592621691275889832738,\r\n 44274228917432520321923589422876796487670272189318,\r\n 47451445736001306439091167216856844588711603153276,\r\n 70386486105843025439939619828917593665686757934951,\r\n 62176457141856560629502157223196586755079324193331,\r\n 64906352462741904929101432445813822663347944758178,\r\n 92575867718337217661963751590579239728245598838407,\r\n 58203565325359399008402633568948830189458628227828,\r\n 80181199384826282014278194139940567587151170094390,\r\n 35398664372827112653829987240784473053190104293586,\r\n 86515506006295864861532075273371959191420517255829,\r\n 71693888707715466499115593487603532921714970056938,\r\n 54370070576826684624621495650076471787294438377604,\r\n 53282654108756828443191190634694037855217779295145,\r\n 36123272525000296071075082563815656710885258350721,\r\n 45876576172410976447339110607218265236877223636045,\r\n 17423706905851860660448207621209813287860733969412,\r\n 81142660418086830619328460811191061556940512689692,\r\n 51934325451728388641918047049293215058642563049483,\r\n 62467221648435076201727918039944693004732956340691,\r\n 15732444386908125794514089057706229429197107928209,\r\n 55037687525678773091862540744969844508330393682126,\r\n 18336384825330154686196124348767681297534375946515,\r\n 80386287592878490201521685554828717201219257766954,\r\n 78182833757993103614740356856449095527097864797581,\r\n 16726320100436897842553539920931837441497806860984,\r\n 48403098129077791799088218795327364475675590848030,\r\n 87086987551392711854517078544161852424320693150332,\r\n 59959406895756536782107074926966537676326235447210,\r\n 69793950679652694742597709739166693763042633987085,\r\n 41052684708299085211399427365734116182760315001271,\r\n 65378607361501080857009149939512557028198746004375,\r\n 35829035317434717326932123578154982629742552737307,\r\n 94953759765105305946966067683156574377167401875275,\r\n 88902802571733229619176668713819931811048770190271,\r\n 25267680276078003013678680992525463401061632866526,\r\n 36270218540497705585629946580636237993140746255962,\r\n 24074486908231174977792365466257246923322810917141,\r\n 91430288197103288597806669760892938638285025333403,\r\n 34413065578016127815921815005561868836468420090470,\r\n 23053081172816430487623791969842487255036638784583,\r\n 11487696932154902810424020138335124462181441773470,\r\n 63783299490636259666498587618221225225512486764533,\r\n 67720186971698544312419572409913959008952310058822,\r\n 95548255300263520781532296796249481641953868218774,\r\n 76085327132285723110424803456124867697064507995236,\r\n 37774242535411291684276865538926205024910326572967,\r\n 23701913275725675285653248258265463092207058596522,\r\n 29798860272258331913126375147341994889534765745501,\r\n 18495701454879288984856827726077713721403798879715,\r\n 38298203783031473527721580348144513491373226651381,\r\n 34829543829199918180278916522431027392251122869539,\r\n 40957953066405232632538044100059654939159879593635,\r\n 29746152185502371307642255121183693803580388584903,\r\n 41698116222072977186158236678424689157993532961922,\r\n 62467957194401269043877107275048102390895523597457,\r\n 23189706772547915061505504953922979530901129967519,\r\n 86188088225875314529584099251203829009407770775672,\r\n 11306739708304724483816533873502340845647058077308,\r\n 82959174767140363198008187129011875491310547126581,\r\n 97623331044818386269515456334926366572897563400500,\r\n 42846280183517070527831839425882145521227251250327,\r\n 55121603546981200581762165212827652751691296897789,\r\n 32238195734329339946437501907836945765883352399886,\r\n 75506164965184775180738168837861091527357929701337,\r\n 62177842752192623401942399639168044983993173312731,\r\n 32924185707147349566916674687634660915035914677504,\r\n 99518671430235219628894890102423325116913619626622,\r\n 73267460800591547471830798392868535206946944540724,\r\n 76841822524674417161514036427982273348055556214818,\r\n 97142617910342598647204516893989422179826088076852,\r\n 87783646182799346313767754307809363333018982642090,\r\n 10848802521674670883215120185883543223812876952786,\r\n 71329612474782464538636993009049310363619763878039,\r\n 62184073572399794223406235393808339651327408011116,\r\n 66627891981488087797941876876144230030984490851411,\r\n 60661826293682836764744779239180335110989069790714,\r\n 85786944089552990653640447425576083659976645795096,\r\n 66024396409905389607120198219976047599490197230297,\r\n 64913982680032973156037120041377903785566085089252,\r\n 16730939319872750275468906903707539413042652315011,\r\n 94809377245048795150954100921645863754710598436791,\r\n 78639167021187492431995700641917969777599028300699,\r\n 15368713711936614952811305876380278410754449733078,\r\n 40789923115535562561142322423255033685442488917353,\r\n 44889911501440648020369068063960672322193204149535,\r\n 41503128880339536053299340368006977710650566631954,\r\n 81234880673210146739058568557934581403627822703280,\r\n 82616570773948327592232845941706525094512325230608,\r\n 22918802058777319719839450180888072429661980811197,\r\n 77158542502016545090413245809786882778948721859617,\r\n 72107838435069186155435662884062257473692284509516,\r\n 20849603980134001723930671666823555245252804609722,\r\n 53503534226472524250874054075591789781264330331690]\r\n \r\n sum = 0\r\n \r\n for n in numbers:\r\n sum += n\r\n \r\n return int(str(sum)[:10])", "def get_number(word, i_type='S'):\n\n resultdict = {}\n if word is None:\n return resultdict\n\n word = str(word)\n regexStr = None\n if i_type == 'S':\n regexStr = re.search(r'^[0-9\\-]+', word)\n else:\n regexStr = re.search(r'[0-9\\-]+', word)\n\n if regexStr is not None:\n # pdb.set_trace()\n numList = []\n if '-' in word:\n numList = word.split('-')\n else:\n numList.append(word)\n\n for idx, numWord in enumerate(numList):\n if idx > 1:\n resultdict = {}\n break\n \"\"\"\n Let's get number and suffix for number1\n and number2\n \"\"\"\n # to get the number\n regexNum = re.search(r'[0-9]+', numWord)\n key = 'number_' + str(idx + 1)\n if regexNum is not None:\n try:\n resultdict[key] = int(regexNum.group().split(' ')[0])\n except:\n pass\n # resultdict[key] = regexNum.group().split(' ')[0]\n\n # to get suffix\n regexSuff = re.search(r'[a-zA-Z]+', numWord)\n key = key + '_suff'\n if regexSuff:\n # resultdict[key] = regexSuff.group().split(' ')[0]\n \"\"\"\n dont think we should have suffix more than 1\n character\n there are few cases but we are ignoring them...\n \"\"\"\n suff = regexSuff.group().split(' ')[0]\n if i_type == 'S':\n if len(suff) == 1:\n resultdict[key] = suff\n else:\n resultdict = {}\n else:\n if len(suff) < 3:\n resultdict[key] = suff\n\n return resultdict", "def test_hackerrank_sample2(self):\n result = find_digits(1012)\n self.assertEquals(result, 3)", "def calculate_score(result):\n sample1=result['Sample1']\n sample2=result['Sample2']\n string1=paragraph_to_list(sample1)\n string2=paragraph_to_list(sample2)\n \n return round( strings_similarity(string1, string2), 2)\n #method_dict=strings_count_compare(string1, string2)/ max(len(string1), len(string2))\n #return round(0.5*(method_difflab+method_dict), 2)", "def complexity(s, **kwargs):\n num, den = 1, 1\n for k in range(1, len(s)):\n k4 = 4**k # For DNA\n num += min(len(set(s[i:i+k] for i in range(len(s) - k + 1))), k4)\n den += min(len(s) - k + 1, k4)\n return num / den", "def readability_measurements(passage: str):\n results = readability.getmeasures(passage, lang='en')\n \n chars_per_word = results['sentence info']['characters_per_word']\n syll_per_word = results['sentence info']['syll_per_word']\n words_per_sent = results['sentence info']['words_per_sentence']\n \n kincaid = results['readability grades']['Kincaid']\n ari = results['readability grades']['ARI']\n coleman_liau = results['readability grades']['Coleman-Liau']\n flesch = results['readability grades']['FleschReadingEase']\n gunning_fog = results['readability grades']['GunningFogIndex']\n lix = results['readability grades']['LIX']\n smog = results['readability grades']['SMOGIndex']\n rix = results['readability grades']['RIX']\n dale_chall = results['readability grades']['DaleChallIndex']\n \n tobeverb = results['word usage']['tobeverb']\n auxverb = results['word usage']['auxverb']\n conjunction = results['word usage']['conjunction']\n pronoun = results['word usage']['pronoun']\n preposition = results['word usage']['preposition']\n nominalization = results['word usage']['nominalization']\n \n pronoun_b = results['sentence beginnings']['pronoun']\n interrogative = results['sentence beginnings']['interrogative']\n article = results['sentence beginnings']['article']\n subordination = results['sentence beginnings']['subordination']\n conjunction_b = results['sentence beginnings']['conjunction']\n preposition_b = results['sentence beginnings']['preposition']\n \n return [chars_per_word, syll_per_word, words_per_sent,\n kincaid, ari, coleman_liau, flesch, gunning_fog, lix, smog, rix, dale_chall,\n tobeverb, auxverb, conjunction, pronoun, preposition, nominalization,\n pronoun_b, interrogative, article, subordination, conjunction_b, preposition_b]", "def main() -> None:\n numbers = []\n with open('./inputs/day01.txt') as infile:\n for line in infile.readlines():\n numbers.append(int(line))\n\n match = find_match(numbers, 2020)\n print(match, reduce(lambda x, y: x * y, match))\n match = find_match(numbers, 2020, 3)\n print(match, reduce(lambda x, y: x * y, match))", "def test_calculate_working_days():\n assert (\n calculate_working_days(parse('2020-01-01'), parse('2020-03-31')) == 64\n )", "def metric(x,y):\n sm = x + y\n df = x - y\n div = sm / df if df != 0 else 0\n return \"sum is %s \" %sm, \"difference is %s \" %df, \"division of difference to sum is %s\" %div", "def s_words(words):\n\t\n\treturn words // 100 / 10", "def test_count_6_645_243(self):\n value: int = 6_645_243\n result: int = 3_615_948\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')", "def calc_mileage(num_weeks, days_first_week, days_last_week):\r\n initial_mileage = get_mileage(\"How many miles per week would you like to \"\r\n \"run at the start of your training?\",\r\n \"Initial Miles Per Week\", 1, 70,\r\n \" per week to start\")\r\n final_mileage = get_mileage(\"How many miles per week would you like to run \"\r\n \"by the end of your training?\", \"Final Miles \"\r\n \"Per Week\", max(30, initial_mileage)\r\n , 100, \" per week at the end\")\r\n weekly_mileage = []\r\n if num_weeks == 2:\r\n weekly_mileage += [round(final_mileage * 0.75 * days_first_week / 7)]\r\n elif num_weeks == 3:\r\n weekly_mileage += [round(final_mileage * (days_first_week / 7))]\r\n weekly_mileage += [round(final_mileage * 0.75)]\r\n elif num_weeks == 4:\r\n weekly_mileage += [round(initial_mileage * (days_first_week / 7))]\r\n weekly_mileage.append(final_mileage)\r\n weekly_mileage += [round(final_mileage * 0.75)]\r\n elif num_weeks > 4:\r\n if days_first_week < 7:\r\n weekly_mileage += [round(initial_mileage * (days_first_week / 7))]\r\n num_weeks -= 1\r\n weekly_mileage += [round(initial_mileage + (i/(num_weeks - 3)) *\r\n (final_mileage - initial_mileage)) for i in range(\r\n num_weeks - 2)]\r\n weekly_mileage += [round(final_mileage * 0.75)]\r\n weekly_mileage += [round(final_mileage / 2 * ((days_last_week - 1) / 7))]\r\n return (initial_mileage, final_mileage, weekly_mileage)", "def bruteForceTime():\n start_time = time.time()\n subjects = loadSubjects(SUBJECT_FILENAME)\n maxWork = 8\n answer = bruteForceAdvisor(subjects, maxWork)\n end_time = time.time()\n printSubjects(answer)\n print 'Time taken: ', end_time - start_time\n return None", "def AllindividualRuns():\n #800 nm\n RunData(getFiles(mintime=(15, 40, 07), maxtime=(15, 45, 14), folder='data/29Jul/'), out='I800nm')\n RunData(getFiles(mintime=(15, 12, 20), maxtime=(15, 24, 16), folder='data/31Jul/'), out='I800nm5k')\n RunData(getFiles(mintime=(15, 28, 40), maxtime=(15, 39, 21), folder='data/31Jul/'), out='I800nm10k')\n RunData(getFiles(mintime=(15, 43, 24), maxtime=(15, 51, 47), folder='data/31Jul/'), out='I800nm20k')\n RunData(getFiles(mintime=(15, 56, 11), maxtime=(16, 02, 58), folder='data/31Jul/'), out='I800nm30k')\n RunData(getFiles(mintime=(16, 12, 39), maxtime=(16, 18, 25), folder='data/31Jul/'), out='I800nm38k')\n RunData(getFiles(mintime=(16, 21, 52), maxtime=(16, 26, 16), folder='data/31Jul/'), out='I800nm50k')\n RunData(getFiles(mintime=(16, 32, 02), maxtime=(16, 35, 23), folder='data/31Jul/'), out='I800nm54k')\n #700 nm\n RunData(getFiles(mintime=(17, 20, 17), maxtime=(17, 33, 17), folder='data/30Jul/'), out='I700nm5k')\n RunData(getFiles(mintime=(17, 37, 35), maxtime=(17, 46, 51), folder='data/30Jul/'), out='I700nm9k')\n RunData(getFiles(mintime=(17, 48, 35), maxtime=(17, 56, 03), folder='data/30Jul/'), out='I700nm52k')\n RunData(getFiles(mintime=(17, 58, 18), maxtime=(17, 59, 31), folder='data/30Jul/'), out='I700nm32k')\n #600 nm\n RunData(getFiles(mintime=(15, 22, 00), maxtime=(15, 36, 32), folder='data/30Jul/'), out='I600nm5k')\n RunData(getFiles(mintime=(15, 39, 58), maxtime=(15, 47, 58), folder='data/30Jul/'), out='I600nm54k')\n RunData(getFiles(mintime=(15, 52, 07), maxtime=(16, 06, 32), folder='data/30Jul/'), out='I600nm10k')\n #890 nm\n RunData(getFiles(mintime=(13, 37, 37), maxtime=(13, 50, 58), folder='data/01Aug/'), out='I890nm5k')\n RunData(getFiles(mintime=(14, 00, 58), maxtime=(14, 11, 54), folder='data/01Aug/'), out='I890nm10k')\n RunData(getFiles(mintime=(14, 17, 57), maxtime=(14, 25, 49), folder='data/01Aug/'), out='I890nm30k')\n RunData(getFiles(mintime=(14, 30, 03), maxtime=(14, 34, 37), folder='data/01Aug/'), out='I890nm50k')", "def get_tot_th(x):\n\ttot_count = 0\n\tth_count = 0\n\tfor y in x:\n\t\ttestval = y.split(':')\n\t\ttestval = testval[0][-1]\n\t\tif testval == \"9\":\n\t\t\ttot_count += 1\n\t\telif testval == \"1\":\n\t\t\tth_count +=1\n\t\telse:\n\t\t\tcontinue\n\treturn tot_count,th_count", "def get_reading_length(self):\n body_text_length= len(self.body_text.split(' ')) # split the body_text into words (using whitespace as the delimiter) and count how many words there are \n reading_length = 'Reading time: ' + str(round(body_text_length / 200)) + ' mins' #apparently, studies have shown that 238 words per minute is the average reading speed, but I'll round it down to 200\n #using round() to round the number to an integer, discarding any decimal digits\n\n return reading_length", "def test_count_361_080(self):\n value: int = 361_080\n result: int = 188_067\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')", "def count_all_tallies_and_times(contents):\n dates_tallies = convert_tallies_and_times(contents)\n total_tallies = []\n for dates, tallies in dates_tallies.items():\n tallies = tallies.split(' ')\n for tally in tallies:\n if tally == '':\n continue\n else:\n total_tallies.append(int(tally))\n return total_tallies", "def __get_number_of_day(units):\n multiplier = DAYS_IN_A_YEAR\n if units:\n if units.lower().startswith(\"w\"):\n multiplier = DAYS_IN_A_WEEK\n elif units.lower().startswith(\"m\"):\n multiplier = DAYS_IN_A_MONTH\n return multiplier", "def get_memory(soup):\n memory = soup.find_all(\"span\", {\"class\": \"specs-brief-accent\"})\n if len(memory):\n memory = re.findall(r'\\d+', memory[3].get_text())\n if len(memory):\n memory = int(memory[0])\n else:\n memory = 0\n else:\n memory = 0\n return memory", "def at_content(seq):\n result = float(str(seq).count('A') + str(seq).count('T'))/len(seq) *100\n return result", "def test_count_361_077(self):\n value: int = 361_077\n result: int = 188_065\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')", "def twenty_seventeen():\n return (2 * 8 + 1) + 2 * 5 * 5 * 2 * 2 * 5 * 2", "def test_count_7000(self):\n value: int = 7000\n result: int = 3333\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')", "def test_count_361_070(self):\n value: int = 361_070\n result: int = 188_058\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')", "def addition_homework(data: Iterator[str]) -> int:\n n = final_sum(data)\n return n.magnitude" ]
[ "0.57274574", "0.5691518", "0.5691518", "0.5494114", "0.5486369", "0.5464641", "0.54049367", "0.53898066", "0.5387856", "0.53810817", "0.5366881", "0.5366617", "0.5352287", "0.53466666", "0.53197426", "0.52943444", "0.5284739", "0.5271887", "0.5271256", "0.5255226", "0.52428466", "0.5236887", "0.5231502", "0.52290237", "0.5205058", "0.5204184", "0.5189072", "0.518633", "0.51601714", "0.51547813", "0.5154229", "0.5138166", "0.5124665", "0.5120955", "0.5120411", "0.5120411", "0.5120411", "0.5112026", "0.51092964", "0.5107589", "0.5104303", "0.5103836", "0.5102235", "0.51006943", "0.5092867", "0.5089539", "0.50863695", "0.50845563", "0.5068713", "0.50654924", "0.50623286", "0.5042604", "0.5039977", "0.5028675", "0.5019994", "0.5014003", "0.5013533", "0.50126266", "0.5003435", "0.4999244", "0.4988357", "0.49696603", "0.4965911", "0.49605158", "0.49449643", "0.4944686", "0.49421978", "0.4940288", "0.49392703", "0.49383345", "0.49352342", "0.49320382", "0.4930824", "0.49257478", "0.49256113", "0.49254867", "0.4917147", "0.49159175", "0.49154353", "0.4915042", "0.49085844", "0.49050957", "0.48940474", "0.4875571", "0.4874203", "0.48723707", "0.48720872", "0.48691848", "0.48647767", "0.48623243", "0.48622656", "0.48591053", "0.48581585", "0.48536262", "0.484868", "0.4847584", "0.48453796", "0.48406687", "0.48377392", "0.4831886" ]
0.50360847
53
Initialization of the mpc controller. Most of the parameters are fixed as this depends on the arm we have.
def __init__(self, ACTION_DIM = 4): # TODO: There is probably a more elegant way of doing this # load neural network params self.start = 0. if ACTION_DIM == 4: self.ACTION_DIM = ACTION_DIM else: print('you are doing something wrong') exit() # number of degress of freedom self.ndegres = 4 self.state_dim = 2*self.ndegres # the horizon is hard coded self.horizon = 5 # bounds # we start by a defining the bounds for each of the control inputs self.lb = -8. # -220. self.up = 8. # 190. self.current_lb = -250. self.current_up = 250. # initial action self.action = np.ones(self.ACTION_DIM) # for the optimization problem we use a chain of actions # the initial estimate is zeros self.h_action = np.zeros(self.ACTION_DIM*self.horizon) self.start_time = [] self._x = [] self.pos_buffer = [] self.vel_buffer =[] self.u_buffer = [] self.error = np.zeros((3,4)) # desired trajectory # TODO: This has to be moved to a subscriber so that the planner can send the commands self.waypoints = ([[0., 0., 0., 0., 0, 0., 0., 0. ]]) # ([[.1, 0., 0.05, 0., 0.5, 0., 0.2, 0. ], [.0, 0., 0., 0., 0.5, 0., 0.2, 0.]]) self.flag = 0 self.count = 0 # ref is just a single command self.ref = self.waypoints[self.flag] # this takes into consideration the h_ref over the horizon self.h_ref = np.array([self.ref for _ in range(self.horizon)]) #TODO: subscribers self.nav_rate = 15 self.dt = 1.0 / self.nav_rate self.timeout = 0.99*(1.0 / self.nav_rate) # it should time out earlier than 0.05 to keep the rate self.r_loop = rospy.Rate(self.nav_rate) self.position = np.zeros(self.ndegres) self.unprocessded_position = np.zeros(self.ndegres) self.velocity = np.zeros(self.ndegres) self.state = np.zeros(self.ndegres) self.current_command = np.zeros(self.ndegres) # Publishers # we need integration with the arm for the tau #self.pub_tau = rospy.Publisher('/r5m_0/cmd_tau', Float32MultiArray, queue_size = 10) #self.pub_vel = rospy.Publisher('/r5m_0/cmd_velocity', Float32MultiArray, queue_size = 10) #self.pub_pos = rospy.Publisher('/r5m_0/cmd_position', Float32MultiArray, queue_size = 10) self.cmd_current_pub = rospy.Publisher('r5m_0/cmd_current', single_float, queue_size=10) # Subscribers self.sub_vel = rospy.Subscriber('/r5m_0/rec_velocity', Float32MultiArray, self.get_velocity, tcp_nodelay=True, queue_size=10) # self.sub_vel = rospy.Subscriber('/r5m_0/velocity', single_float, self.get_velocity_arm, tcp_nodelay=True, queue_size=10) # self.sub_pos = rospy.Subscriber('/r5m_0/rec_position', Float32MultiArray, self.get_position, tcp_nodelay=True, queue_size=10) self.sub_pln = rospy.Subscriber('/r5m_0/plan', RobotTrajectory, self.get_plan, tcp_nodelay=True, queue_size=10) self.sub_pos = rospy.Subscriber('/r5m_0/position', single_float, self.get_position_arm, tcp_nodelay=True, queue_size=10) # service self.req_joint_plan = rospy.ServiceProxy('/r5m_0/joint_plan_req', Empty) self.req_plan = rospy.ServiceProxy('/r5m_0/plan_req', Empty)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_controller(self, mode, p=None, d=None, torque=None):\n self.set_mode(Mode[mode])\n\n if p is not None:\n self.kp = p\n elif Mode[mode] == Mode.JOINT_IMP_CTRL:\n self.kp = [32000, 32000, 32000, 32000, 32000, 32000, 32000]\n elif Mode[mode] == Mode.CART_IMP_CTRL:\n self.kp = [2000, 2000, 2000, 20, 20, 20, None]\n # self.kp = [0.0, 0.00, 0.00, 0.00, 0.00, 0.00, None]\n #self.kp = [2, 2, 0.9, 0.2, 0.2, 0.2] + [None]\n #self.kp = [0.2, 0.00, 0.00, 0.00, 0.00, 0.00, None]\n\n if d is not None:\n self.kd = d\n elif Mode[mode] == Mode.JOINT_IMP_CTRL:\n self.kd = [15, 15, 15, 15, 15, 15, 15]\n elif Mode[mode] == Mode.CART_IMP_CTRL:\n self.kd = [0.7, 0.7, 0.7, 0.7, 0.7, 0.7, None]\n #self.kd = [0.005]*6 + [None]\n\n if torque is not None:\n self.torque = torque\n else:\n self.torque = np.zeros(7)", "def init(self):\n self.reset()\n\n self.__interface.send_command('POWER_SETTING')\n self.__interface.send_data(0x37)\n self.__interface.send_data(0x00)\n\n self.__interface.send_command('PANEL_SETTING')\n self.__interface.send_data(0xCF)\n self.__interface.send_data(0x08)\n\n self.__interface.send_command('BOOSTER_SOFT_START')\n self.__interface.send_data(0xc7)\n self.__interface.send_data(0xcc)\n self.__interface.send_data(0x28)\n\n self.__interface.send_command('POWER_ON')\n self.wait_until_idle()\n\n self.__interface.send_command('PLL_CONTROL')\n self.__interface.send_data(0x3c)\n\n self.__interface.send_command('TEMPERATURE_CALIBRATION')\n self.__interface.send_data(0x00)\n\n self.__interface.send_command('VCOM_AND_DATA_INTERVAL_SETTING')\n self.__interface.send_data(0x77)\n\n self.__interface.send_command('TCON_SETTING')\n self.__interface.send_data(0x22)\n\n self.__interface.send_command('TCON_RESOLUTION')\n self.__interface.send_data(0x02) #source 640\n self.__interface.send_data(0x80)\n self.__interface.send_data(0x01) #gate 384\n self.__interface.send_data(0x80)\n\n self.__interface.send_command('VCM_DC_SETTING')\n self.__interface.send_data(0x1E) #decide by LUT file\n\n self.__interface.send_command(0xe5, False) #FLASH MODE\n self.__interface.send_data(0x03)", "def initialize_multiprocessing(self):\n if self.multiprocessing_controller is not None:\n MPControl.set_multiprocess_engine(self.multiprocessing_controller)\n MPControl.connect()", "def init(self):\n self.l_motor = lazytalonsrx.LazyTalonSRX(Constants.IL_MOTOR_ID)\n self.r_motor = lazytalonsrx.LazyTalonSRX(Constants.IR_MOTOR_ID)\n self.l_motor.initialize(\n inverted=False, encoder=False, phase=False, name=\"Intake Left\")\n self.r_motor.initialize(\n inverted=True, encoder=False, phase=False, name=\"Intake Right\")", "def init(self):\n self.AOMBoxConnection = pyArdDAC.ARD_DAC(HOST=self.AOMBox_IP, PORT=8888, DEBUG=False)#connects to arduino in High frequency Na AOM box\n #channel number should be defined in subclass\n self.INTEGER_MIN = 0\n self.INTEGER_MAX = 65535\n self.VOLTAGE_MIN = 0.0\n self.VOLTAGE_MAX = 5.0\n self.initialised=True\n return \"%s init successful\" % self.hardwareActionName", "def pwm_controller_init(self, chain: machine.I2C = None, freq: int = 333):\n if self.antenny_config.get(\"use_motor\"):\n print(\"use_motor found in config: {}\".format(self.antenny_config.get_name()))\n if chain is None:\n i2c_pwm_controller_scl = self.antenny_config.get(\"i2c_pwm_controller_scl\")\n i2c_pwm_controller_sda = self.antenny_config.get(\"i2c_pwm_controller_sda\")\n self.i2c_pwm_controller = self.i2c_init(\n 0,\n i2c_pwm_controller_scl,\n i2c_pwm_controller_sda\n )\n else:\n self.i2c_pwm_controller = chain\n pwm_controller = Pca9685Controller(self.i2c_pwm_controller, freq=freq)\n print(\"Motor connected\")\n safe_mode = False\n else:\n pwm_controller = MockPWMController()\n print(\"According to your config, you do not have a motor connected, entering Safe Mode\")\n safe_mode = True\n self.pwm_controller = pwm_controller\n self.safe_mode = safe_mode\n return pwm_controller, safe_mode", "def __init__(self):\r\n\r\n #480p 2.39:1 720x302\r\n #2048x2048 is more than 7.3GB of vRAM for the Master DISC model\r\n\r\n #Loading the preprocessed data\r\n preprocessVars = Preprocess()\r\n\r\n #The training and display of the trained models\r\n self.modelTrain = train.Train(preprocessVars)\r\n self.disp = display.Display(preprocessVars)", "def initialize(self):\n self.log.info(\"Initialize raspPinball hardware.\")\n\n self.config = self.machine.config['rasppinball']\n self.machine.config_validator.validate_config(\"rasppinball\", self.config)\n print(\"***************************\")\n print(self.config)\n #self.machine_type = (\n # self.machine.config['hardware']['driverboards'].lower())\n\n self._connect_to_hardware()\n\n\n # keypad\n self._kp = Keypad()\n self.old_key = \"\"\n self.key = \"\"\n # leds\n self.init_strips()", "def __init__(self):\n GPIO.setwarnings(False)\n GPIO.cleanup() # Reset the high and low levels of the GPIO port\n #The following code defines the GPIO used to control the L298N chip. This definition is different for different Raspberry Pi driver boards.\n self.Motor_A_EN = 17\n self.Motor_B_EN = 4\n self.Motor_A_Pin1 = 27\n self.Motor_A_Pin2 = 18\n self.Motor_B_Pin1 = 21\n self.Motor_B_Pin2 = 26\n self.setup()", "def on_init(self):\n self.controller = gameController.Controller()", "def initialize(self):\n self.initilize_multiply_array() # m\n self.initialize_cameras()\n self.initialize_electronics()\n self.logger.info('Starting free runs and continuous reads')\n self.camera_microscope.start_free_run()\n self.camera_microscope.continuous_reads()\n self.camera_fiber.start_free_run()\n self.camera_fiber.continuous_reads()\n self.servo_off()\n\n time.sleep(1) #m Without the sleep below initialize_multiply_array does not work", "def __init__(self, controller_type, namespace):\n assert controller_type in [\"position\", \"velocity\", \"effort\"]\n rospy.Subscriber(namespace + controller_type + \"_controller/command\",\n Float64MultiArray, self._pve_control_cb, queue_size=1)\n self._cmd = 0.0\n self._data_available = False", "def robotInit(self):\n\n #Initialize Networktables\n self.sd = NetworkTables.getTable('SmartDashboard')\n\n \n #Set up motors to drive robot\n self.M2 = wpilib.VictorSP(2)\n self.M3 = wpilib.VictorSP(3)\n #self.M2.setInverted(True)\n #self.M3.setInverted(True)\n self.left = wpilib.SpeedControllerGroup(self.M2,self.M3)\n \n self.M0 = wpilib.VictorSP(0)\n self.M1 = wpilib.VictorSP(1)\n self.right = wpilib.SpeedControllerGroup(self.M0,self.M1)\n self.drive = wpilib.drive.DifferentialDrive(self.left, self.right)\n \n \n self.stick = wpilib.Joystick(1)\n self.timer = wpilib.Timer()\n #Camera\n wpilib.CameraServer.launch()\n #Servo\n self.SV1 = wpilib.Servo(9)\n self.SV2 = wpilib.Servo(8) \n #Dashboard\n NetworkTables.initialize(server='10.61.62.2')\n #Switches\n self.SW0 = wpilib.DigitalInput(0)\n self.SW1 = wpilib.DigitalInput(1)\n #Elevator\n self.E = wpilib.VictorSP(5)\n self.prepareCubeFlag = 0\n self.grabCubeFlag = 0\n self.deliverCubeFlag = 0\n self.adjustLeftFlag=0\n self.adjustRightFlag=0\n self.driveFlag=0\n #Gyro\n self.gyro = wpilib.ADXRS450_Gyro(0)\n self.gyro.reset()\n #All possible autonomous routines in a sendable chooser\n '''\n self.chooser = wpilib.SendableChooser()\n self.chooser.addDefault(\"None\", '4')\n self.chooser.addObject(\"left-LeftScale\", '1')\n self.chooser.addObject(\"Middle-LeftScale\", '2')\n self.chooser.addObject(\"Right-LeftScale\", '3')\n self.chooser.addObject(\"Left-RightScale\", '5')\n '''\n #wpilib.SmartDashboard.putData('Choice', self.chooser)\n #Encoders\n self.EC1 = wpilib.Encoder(2,3)\n self.EC2 = wpilib.Encoder(4,5)\n self.EC1.reset()\n self.EC2.reset()", "def __init__ (self) :\n self.loadCSPAD2x2CalibParsDefault()", "def __init__(self, compute_driver=None, *args, **kwargs):\n self.network_api = network.API()\n self.virtapi = ComputeVirtAPI(self)\n self.driver = driver.load_compute_driver(self.virtapi, compute_driver)\n self._resource_tracker_dict = {}\n self._sync_power_pool = eventlet.GreenPool()\n self._syncs_in_progress = {}\n\n super(ControllerManager, self).__init__(service_name=\"controller\", *args, **kwargs)", "def initialize_electronics(self):\n\n self.electronics = ArduinoModel(**self.config['electronics']['arduino'])\n self.logger.info('Initializing electronics arduino')\n self.electronics.initialize()", "def __init__(self):\n\n super().__init__()\n\n self.active = True\n self.driver = Driver.instance()\n self.sensor_manager = SensorManager.instance()\n\n self.pwm = Adafruit_PCA9685.PCA9685(address=0x40, busnum=1) # create PCA9685-object at I2C-port\n self.pwm.set_pwm_freq(50)\n\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(20, GPIO.OUT)\n GPIO.setup(21, GPIO.OUT)\n GPIO.setup(26, GPIO.OUT)\n self.driven_distance = 0", "def _init_hardware(self):\n return", "def _initialize(self):\n self.send_init_command()", "def platform_init(self):\n if isinstance(self.imu, MockImuController) or isinstance(self.pwm_controller, MockPWMController):\n print(\"Mock components detected, creating mock antenna controller\")\n platform = MockPlatformController(self.azimuth_servo, self.elevation_servo, self.imu)\n else:\n print(\"Initializing PIDAntennaController class\")\n platform = PIDPlatformController(\n self.azimuth_servo,\n self.elevation_servo,\n self.imu,\n pid_output_limits=self.pid_config.get(\"output_limits\"),\n pid_frequency=self.pid_config.get(\"period\"),\n p=self.pid_config.get(\"p\"),\n i=self.pid_config.get(\"i\"),\n d=self.pid_config.get(\"d\")\n )\n \n self.platform = platform\n\n if not isinstance(self.gps, MockGPSController):\n self.gps_update_loop = GPSLocationController(self.gps)\n self.gps_update_loop.start()\n else:\n self.gps_update_loop = None\n \n return platform", "def initialise(self):\n self.sc.init.exec_action(self.variables)", "def init(self):\n self.IP_ADDRESS = \"192.168.16.55\"\n self.PORT = 8888\n self.windFreakConnection = windFreakClient.ConnectionConstantFrequency(IP_ADDRESS=self.IP_ADDRESS, port=self.PORT) \n self.initialised=True\n return \"%s init successful\" % self.hardwareActionName", "def load_params(self, event):\n \n self.robot_type = rospy.get_param(\"robot_type\" , 'pendulum' )\n self.robot_config = rospy.get_param(\"robot_config\", 'wrist-only' )\n self.robot_ctl = rospy.get_param(\"controller\", 'RfixCTC' )\n self.fixed_mode = rospy.get_param(\"fixed_mode\", 1 )\n \n \n ###############################################\n # Load robot model for the right configuration\n if self.robot_config == 'wrist-only':\n self.R = Proto.SingleRevoluteDSDM()\n \n elif self.robot_config == 'dual-plane' :\n self.R = Proto.TwoPlanarSerialDSDM()\n \n else:\n self.R = None\n \n ###############################################\n # Load controller\n if self.robot_ctl == 'RfixCTC' :\n self.Ctl = RminCTC.RfixComputedTorqueController( self.R , self.fixed_mode )\n \n elif self.robot_ctl == 'RminCTC' :\n self.Ctl = RminCTC.RminComputedTorqueController( self.R )\n \n elif self.robot_ctl == 'RfixSLD' :\n self.Ctl = RminCTC.RfixSlidingModeController( self.R , self.fixed_mode )\n \n elif self.robot_ctl == 'RminSLD' :\n self.Ctl = RminCTC.RminSlidingModeController( self.R )\n \n elif self.robot_ctl == 'RollCTC' :\n self.Ctl = RollCTC.RolloutComputedTorqueController( self.R )\n \n elif self.robot_ctl == 'RollSLD' :\n self.Ctl = RollCTC.RolloutSlidingModeController( self.R )\n \n else:\n self.Ctl = None\n \n \n if self.robot_config == 'wrist-only':\n self.Ctl.n_gears = rospy.get_param(\"n_gears\", 2 )\n self.x_d = np.array( rospy.get_param(\"goal\", [0,0] ) )\n \n elif self.robot_config == 'dual-plane' :\n self.Ctl.n_gears = rospy.get_param(\"n_gears\", 4 )\n self.x_d = np.array( rospy.get_param(\"goal\", [0.0,0.0,0.0,0.0] ) )\n #self.x_d = np.array( [-3.14 , 0 , 0 , 0] )\n \n # Gen ctl params\n self.Ctl.hysteresis = rospy.get_param(\"hysteresis\", True )\n self.Ctl.min_delay = rospy.get_param(\"min_delay\", 0.5 )\n \n self.Ctl.w0 = rospy.get_param(\"w0\", 1 )\n self.Ctl.zeta = rospy.get_param(\"zeta\", 0.7 )\n \n self.Ctl.lam = rospy.get_param(\"lam\", 1 )\n self.Ctl.nab = rospy.get_param(\"nab\", 1 )\n self.Ctl.D = rospy.get_param(\"D\", 0 )\n \n self.Ctl.horizon = rospy.get_param(\"horizon\", 0.5 )\n self.Ctl.sim_dt = rospy.get_param(\"sim_dt\", 0.1 )\n \n self.Ctl.domain_check = rospy.get_param(\"domain_check\", False )\n \n # Base policy param for roll \n if self.robot_ctl == 'RollCTC' :\n self.Ctl.FixCtl.lam = self.Ctl.lam\n \n elif self.robot_ctl == 'RollSLD' :\n self.Ctl.FixCtl.lam = self.Ctl.lam \n self.Ctl.FixCtl.nab = self.Ctl.nab \n self.Ctl.FixCtl.D = self.Ctl.D", "def _initialize_variables(\n self,\n model,\n assembler=None,\n mat=None,\n pc=None,\n gmres=None,\n struct_id=None,\n thermal_index=0,\n ):\n\n self.thermal_index = thermal_index\n self.struct_id = struct_id\n\n # Boolean indicating whether TACSAssembler is on this processor\n # or not. If not, all variables are None.\n self.tacs_proc = False\n\n # Assembler object\n self.assembler = None\n\n # TACS vectors\n self.res = None\n self.ans = None\n self.ext_force = None\n self.update = None\n\n # Matrix, preconditioner and solver method\n self.mat = None\n self.pc = None\n self.gmres = None\n\n # TACS node locations\n self.struct_X = None\n\n self.vol = 1.0\n\n if assembler is not None:\n # Set the assembler\n self.assembler = assembler\n self.tacs_proc = True\n\n # Create the scenario-independent solution data\n self.res = self.assembler.createVec()\n self.ans = self.assembler.createVec()\n self.ext_force = self.assembler.createVec()\n self.update = self.assembler.createVec()\n\n # Allocate the nodal vector\n self.struct_X = assembler.createNodeVec()\n self.assembler.getNodes(self.struct_X)\n\n # required for AverageTemp function, not sure if needed on\n # body level\n self.vol = 1.0\n\n # Allocate the different solver pieces - the\n self.mat = mat\n self.pc = pc\n self.gmres = gmres\n\n if mat is None:\n self.mat = assembler.createSchurMat()\n self.pc = TACS.Pc(self.mat)\n self.gmres = TACS.KSM(self.mat, self.pc, 30)\n elif pc is None:\n self.mat = mat\n self.pc = TACS.Pc(self.mat)\n self.gmres = TACS.KSM(self.mat, self.pc, 30)\n elif gmres is None:\n self.mat = mat\n self.pc = pc\n self.gmres = TACS.KSM(self.mat, self.pc, 30)\n\n # Allocate the scenario data\n self.scenario_data = {}\n for scenario in model.scenarios:\n func_list, func_tags = self._allocate_functions(scenario)\n self.scenario_data[scenario] = self.ScenarioData(\n self.assembler, func_list, func_tags\n )\n\n return", "def Init(self):\n RobotMap.Init()\n from commands import *\n from subsystems import *\n#@autogenerated_code(\"constructors\", \" \")\n#parse(\"${exporter-path}core/robot-constructors.py\")\n#end\n # This MUST be here. If the OI creates Commands (which it very likely\n # will), constructing it during the construction of CommandBase (from\n # which commands extend), subsystems are not guaranteed to be\n # yet. Thus, their requires() statements may grab null pointers. Bad\n # news. Don't move it.\n self.oi = OI()\n\n # instantiate the command used for the autonomous period", "def __init__(self):\n self._max_sim_time_reached = False\n self._max_wall_time_reached = False\n self._behavior_finished = False\n self._flexbe_status_subscriber = None\n\n self._mission_finalizers = \"\"\n self._mission_sim_time_in_sec = 0\n self._finalizer_functions = []\n\n self.read_ros_params()\n CiLog.info(\"Init of SimulationControl constructor finished.\")", "def setup(self):\n\n self._enable_torque(self._reg.TORQUE_ENABLE)\n self.change_operating_mode(self._reg.MODE_EXT_POSI)\n # set to max velocity\n self.change_veloity(self._default_velocity)", "def prepare(self):\n super(Test200SmartSanityClear005, self).prepare()\n\n self.logger.info('Preconditions:')\n self.logger.info('1. Open Micro/WINr; ')\n self.logger.info('2. Set up connection with PLC;')\n self.logger.info('3. Download a project which has OB,DB,SDB;')\n self.MicroWIN.test_prepare('reset_factory_01.smart', False)\n # set cpu mode to run\n self.PLC['1'].set_plc_mode(1)\n self.memory_options = self.PLC['1'].find('memory_options')\n # force some value\n self.memory_options.force('v', 'byte', 0, value=self.force_value)\n time.sleep(5)\n self.PLC['1'].set_plc_mode(0)", "def initialisation(self):\n self.create_variables()\n self.create_placeholders()\n self.build_model()\n self.reset_lr(None, True)\n self.build_loss()\n self.initialised = True", "def prepareController(self):\n pass", "def _set_controller_parameters(self, P=None, I=None, D=None):\n pass", "def __init__(self):\n parameters_list = []\n self.config_dict = self.open_config(parameters_list)\n\n # Define defaults\n self.disc_gt = 0.0\n self.disc_out = 0.0", "def __init__(self):\n\n # Diccionario que contendra todas las fuentes para ir llamandolas una por una en ejecucion\n # o poder seleccionar cual lanzar usando el patron factoria a traves de esta clase\n\n self.controller_objects = {'iptables': IptablesController}", "def __init__(self):\n # Call parent initialisers\n # SecmUtilityCore.__init__(self)\n Node.__init__(self, \"vehicle_sim\")\n # super().__init__('vehicle_sim')\n\n self.vehicle_marker_array = MarkerArray()\n self.vehicle_marker = Marker()\n self.pose_msg = Pose()\n self.control_msg = Control()\n\n self.model = Model()\n\n # Create subscribers to listen to SECM output\n self.create_subscription(\n msg_type=Control,\n topic=\"/control\",\n callback=self.receive_control_msg,\n qos_profile=BEST_EFFORT_QOS_PROFILE\n )\n\n # Create pose publisher\n self.pose_publisher = self.create_publisher(\n msg_type=Pose,\n topic=\"/pose\",\n qos_profile=BEST_EFFORT_QOS_PROFILE\n )\n\n # Create marker publisher\n self.vehicle_marker_publisher = self.create_publisher(\n msg_type=Marker,\n topic=\"/vehicle_marker\",\n qos_profile=BEST_EFFORT_QOS_PROFILE\n )\n\n # Setup timers to spin the execution loop. \n self.create_timer(1.0/30.0, self.execute)", "def _setup(self):\n\n from AlGDock.topology import Topology\n self.top = Topology(self.args)\n self.top_RL = Topology(self.args, includeReceptor=True)\n\n # Initialize rmsd calculation function\n from AlGDock.RMSD import hRMSD\n self.get_rmsds = hRMSD(self.args.FNs['prmtop']['L'], \\\n self.top.inv_prmtop_atom_order_L)\n\n # Obtain reference pose\n if self.data['CD'].pose > -1:\n if ('starting_poses' in self.data['CD'].confs.keys()) and \\\n (self.data['CD'].confs['starting_poses'] is not None):\n starting_pose = np.copy(self.data['CD'].confs['starting_poses'][0])\n else:\n (confs, Es) = self._get_confs_to_rescore(site=False, \\\n minimize=False, sort=False)\n if self.args.params['CD']['pose'] < len(confs):\n starting_pose = np.copy(confs[self.args.params['CD']['pose']])\n self.data['CD'].confs['starting_poses'] = [np.copy(starting_pose)]\n else:\n self._clear('CD')\n self._store_infinite_f_RL()\n raise Exception('Pose index greater than number of poses')\n else:\n starting_pose = None\n\n from AlGDock.system import System\n self.system = System(self.args,\n self.log,\n self.top,\n self.top_RL,\n starting_pose=starting_pose)\n\n # Measure the binding site\n if (self.args.params['CD']['site'] == 'Measure'):\n self.args.params['CD']['site'] = 'Sphere'\n if self.args.params['CD']['site_measured'] is not None:\n (self.args.params['CD']['site_max_R'],self.args.params['CD']['site_center']) = \\\n self.args.params['CD']['site_measured']\n else:\n print '\\n*** Measuring the binding site ***'\n self.system.setParams(\n self.system.paramsFromAlpha(1.0, 'CD', site=False))\n (confs, Es) = self._get_confs_to_rescore(site=False, minimize=True)\n if len(confs) > 0:\n # Use the center of mass for configurations\n # within 20 RT of the lowest energy\n cutoffE = Es['total'][-1] + 20 * (R * self.T)\n coms = []\n for (conf, E) in reversed(zip(confs, Es['total'])):\n if E <= cutoffE:\n self.top.universe.setConfiguration(\n Configuration(self.top.universe, conf))\n coms.append(np.array(self.top.universe.centerOfMass()))\n else:\n break\n print ' %d configurations fit in the binding site' % len(coms)\n coms = np.array(coms)\n center = (np.min(coms, 0) + np.max(coms, 0)) / 2\n max_R = max(\n np.ceil(np.max(np.sqrt(np.sum(\n (coms - center)**2, 1))) * 10.) / 10., 0.6)\n self.args.params['CD']['site_max_R'] = max_R\n self.args.params['CD']['site_center'] = center\n self.top.universe.setConfiguration(\n Configuration(self.top.universe, confs[-1]))\n if ((self.args.params['CD']['site_max_R'] is None) or \\\n (self.args.params['CD']['site_center'] is None)):\n raise Exception('No binding site parameters!')\n else:\n self.args.params['CD']['site_measured'] = \\\n (self.args.params['CD']['site_max_R'], \\\n self.args.params['CD']['site_center'])\n\n # Read the reference ligand and receptor coordinates\n import AlGDock.IO\n IO_crd = AlGDock.IO.crd()\n if self.args.FNs['inpcrd']['R'] is not None:\n if os.path.isfile(self.args.FNs['inpcrd']['L']):\n lig_crd = IO_crd.read(self.args.FNs['inpcrd']['L'], multiplier=0.1)\n self.data['CD'].confs['receptor'] = IO_crd.read(\\\n self.args.FNs['inpcrd']['R'], multiplier=0.1)\n elif self.args.FNs['inpcrd']['RL'] is not None:\n complex_crd = IO_crd.read(self.args.FNs['inpcrd']['RL'], multiplier=0.1)\n lig_crd = complex_crd[self.top_RL.L_first_atom:self.top_RL.L_first_atom + \\\n self.top.universe.numberOfAtoms(),:]\n self.data['CD'].confs['receptor'] = np.vstack(\\\n (complex_crd[:self.top_RL.L_first_atom,:],\\\n complex_crd[self.top_RL.L_first_atom + self.top.universe.numberOfAtoms():,:]))\n elif self.args.FNs['inpcrd']['L'] is not None:\n self.data['CD'].confs['receptor'] = None\n if os.path.isfile(self.args.FNs['inpcrd']['L']):\n lig_crd = IO_crd.read(self.args.FNs['inpcrd']['L'], multiplier=0.1)\n else:\n lig_crd = None\n\n if lig_crd is not None:\n self.data['CD'].confs['ligand'] = lig_crd[self.top.\n inv_prmtop_atom_order_L, :]\n self.top.universe.setConfiguration(\\\n Configuration(self.top.universe,self.data['CD'].confs['ligand']))\n if self.top_RL.universe is not None:\n self.top_RL.universe.setConfiguration(\\\n Configuration(self.top_RL.universe, \\\n np.vstack((self.data['CD'].confs['receptor'],self.data['CD'].confs['ligand']))))\n\n if self.args.params['CD']['rmsd'] is not False:\n if self.args.params['CD']['rmsd'] is True:\n if lig_crd is not None:\n rmsd_crd = lig_crd[self.top.inv_prmtop_atom_order_L, :]\n else:\n raise Exception('Reference structure for rmsd calculations unknown')\n else:\n rmsd_crd = IO_crd.read(self.args.params['CD']['rmsd'], \\\n natoms=self.top.universe.numberOfAtoms(), multiplier=0.1)\n rmsd_crd = rmsd_crd[self.top.inv_prmtop_atom_order_L, :]\n self.data['CD'].confs['rmsd'] = rmsd_crd\n\n self.get_rmsds.set_ref_configuration(self.data['CD'].confs['rmsd'])\n\n # If configurations are being rescored, start with a docked structure\n (confs, Es) = self._get_confs_to_rescore(site=False, minimize=False)\n if len(confs) > 0:\n self.top.universe.setConfiguration(\n Configuration(self.top.universe, confs[-1]))\n\n from AlGDock.simulation_iterator import SimulationIterator\n self.iterator = SimulationIterator(self.args, self.top, self.system)\n\n # Load progress\n from AlGDock.postprocessing import Postprocessing\n Postprocessing(self.args, self.log, self.top, self.top_RL, self.system, self.data, self.save).run(readOnly=True)\n\n self.calc_f_L(readOnly=True)\n self.calc_f_RL(readOnly=True)\n\n if self.args.random_seed > 0:\n np.random.seed(self.args.random_seed)", "def init(self, msg_in = None, client = None):\r\n self.name = 'Oasis_DL'\r\n\r\n self.circular_buffers[b'act_temperature'] = CBServer(size = (2,4320000), var_type = 'float64')\r\n self.circular_buffers[b'cmd_temperature'] = CBServer(size = (2,4320000), var_type = 'float64')\r\n self.circular_buffers[b'fault'] = CBServer(size = (2,10000), var_type = 'float64')\r\n\r\n self.description = ''\r\n\r\n self.task_dictionary[0] = {b'function':driver.get_actual_temperature,b'name':b'act_temperature'}\r\n self.task_dictionary[1] = {b'function':driver.set_target_temperature,b'name':b'cmd_temperature'}\r\n self.task_dictionary[2] = {b'function':driver.get_faults,b'name':b'fault'}\r\n \r\n\r\n\r\n self.task_dictionary[10] = {b'function':driver.set_lower_limit,b'name':b'set_lower_limit'}\r\n self.task_dictionary[11] = {b'function':driver.get_lower_limit,b'name':b'get_lower_limit'}\r\n self.task_dictionary[12] = {b'function':driver.set_upper_limit,b'name':b'set_upper_limit'}\r\n self.task_dictionary[13] = {b'function':driver.get_upper_limit,b'name':b'get_upper_limit'}\r\n\r\n flag = False\r\n message = None\r\n err = ''\r\n flag, message, err = driver.init(), '', ''\r\n if flag:\r\n self.lower_limit = driver.device_dict[b'lower_limit']\r\n self.upper_limit = driver.device_dict[b'upper_limit']\r\n\r\n response = {}\r\n response[b'flag'] = flag\r\n response[b'message'] = message\r\n response[b'error'] = err\r\n return response", "def initialize(self,*args,**kwargs):\n self.__instrumentID = c_uint32(0) \n self.__numInstruments = c_uint32()\n self.__nbrOfChannels = c_uint32()\n self.__nbrADCBits = c_uint32()\n self.__temperature = c_int32()\n self.__time_us = c_double()\n\n self.loadDLLs(**kwargs) # Load the different DLLs or DLL based modules\n self.reinit() # init or reinit the board\n self.createDictAndGlobals() # create dictionaries and global variables\n self.nbrOfChannels=int(self.__nbrOfChannels.value) # duplicate self.nbrOfChannels in a Python type variable \n self.getInitialConfig()", "def __init__(self, param_card=None):\n \n # Param card accessor\n slha = ParamCard(param_card)\n \n self.ZERO = 0.\n\n # Computing independent parameters", "def __init__(self, controller):\n self._controller = controller", "def __init__(self):\n self.ctrl = src.robot_controller.RobotController()\n self.recorder = robot_recorder.RobotRecorder(save_dir=\"/home/guser/sawyer_data/test_recording\", start_loop=False)\n\n # drive to neutral position:\n self.ctrl.set_neutral()\n # import pdb; pdb.set_trace()\n\n self.num_traj = 10\n\n\n limb = 'right'\n self.name_of_service = \"ExternalTools/\" + limb + \"/PositionKinematicsNode/FKService\"\n self.fksvc = rospy.ServiceProxy(self.name_of_service, SolvePositionFK)\n\n self.run_data_collection()", "def init(self):\n self.picomotor = picomotorCommunication.Picomotor(picomotorCommunication.TCP_IP_PICOMOTOR_PLUG)\n connectionResult = self.picomotor.connect()\n self.originalPositionH = self.picomotor.askPosition(self.horizontalAxis)\n self.originalPositionV = self.picomotor.askPosition(self.verticalAxis)\n if connectionResult:\n self.initialised=True\n return \"%s init successful. (original position = (%s,%s))\" % (self.hardwareActionName, self.originalPositionH, self.originalPositionV)\n else:\n return \"%s init failed. Most likely a socket error. see above\" % self.hardwareActionName", "def __init__(self, kp, ki, kd, tolerance,\n saturation=None, max_integral=None, integral_fade=1.0):\n super().__init__()\n self.tolerance = tolerance\n self.controller = pidController(kp, ki, kd, saturation=saturation,\n max_integral=max_integral, integral_fade_rate=integral_fade)", "def autonomousInit(self):\n fieldState = self.driverStation.getGameSpecificMessage()\n self.fieldState = fieldState\n self.smartDashboard.putString(\"field state\", fieldState)\n fieldPosition = self.smartDashboard.getString(\"field position\", \"\")\n self.startingFieldPosition = self.parserobotFieldPosition(fieldPosition)\n self.smartDashboard.putNumber(\"position\", self.startingFieldPosition)\n \n #convert field states to our enum values \n self.ourSwitchSide = self.parserobotFieldPosition(self.fieldState[0])\n self.scaleSide = self.parserobotFieldPosition(self.fieldState[1])\n self.theirSwitchSide = self.parserobotFieldPosition(self.fieldState[2])\n if self.startingFieldPosition==self.kNothing:\n print(\"No field position set. Aborting\")\n return \n \n \n #self.Encoder.setMaxPeriod(.1)\n #self.Encoder.setMinRate(10)\n #self.Encoder.setDistancePerPulse(5)\n #self.Encoder.setReverseDirection(True)\n #self.Encoder.getDistance()\n \n \"\"\"self.Encoder.reset()\n while (self.Encoder.get() < value):\n drive\n delay\"\"\"\n \n \n \n \n \n \n \n #self.Encoder.getRawAxis()\n \n \n #todo change RRR to from fms, maybe parse it first\n \n self.autonomousProgram = commands.autonomousCommand.AutonomousProgram(self.startingFieldPosition)\n self.autonomousProgram.start()", "def init_host(self, **kwargs):\n LOG.info(_LI(\"Starting controller service\"))\n self._init_volumes(self.admin_context)\n self._init_backups(self.admin_context)\n self._init_replicates(self.admin_context)\n self._init_snapshots(self.admin_context)", "def __init__(self, options, positionals):\n\n print \"* Starting up LOPHI Master Process\"\n\n self.COMMANDS = {G.CTRL_CMD_START: self.command_start,\n G.CTRL_CMD_LIST: self.command_list,\n G.CTRL_CMD_PAUSE: self.command_abstract,\n G.CTRL_CMD_UNPAUSE: self.command_abstract,\n G.CTRL_CMD_SPLASH: self.command_splash,\n G.CTRL_CMD_UPDATE_HW: self.command_update_hw,\n G.CTRL_CMD_STOP: self.command_abstract,\n G.CTRL_CMD_DIE: self.command_abstract,\n G.CTRL_CMD_ATTACH: self.command_abstract,\n G.CTRL_CMD_EXECUTE: self.command_abstract}\n\n self.MSG_TYPES = set([G.CTRL_TYPE, G.REG_TYPE])\n\n # response header\n self.RESP_HEADER = \"[LOPHI Master] \"\n\n logger.debug(\"Importing config files...\")\n\n # Save our config file\n self.master_config_file = options.config_file\n\n # Save our config file\n self.analysis_directory = options.analysis_directory\n\n # Read our config into an internal structure \n self.config_list = Configs.import_from_config(self.master_config_file,\n \"controller\")\n\n # Read our analysis scripts into an internal structure\n self.update_analysis()\n\n # Connect to our database\n self.DB_analysis = DB.DatastoreAnalysis(options.services_host)\n\n # Set our RabbitMQ host\n self.amqp_host = options.services_host", "def init_motors(self):\n # self.maxVelocity = 576# -> 5 m/s\n # self.maxTorque = 30\n\n # motor init\n for m in self.motors:\n m.setPosition(float('inf'))\n m.setVelocity(1.)\n\n # Propeller PID control params tunned with Ziegler–Nichols PID\n K_u = 150.\n T_u = 342.857 / 1000. # ms\n # no overshoot\n params_roll = {'P': K_u / 5., 'I': (2. / 5.) * K_u / T_u,\n 'D': K_u * T_u / 15., 'sp': 0.}\n self.rollPID = PID(params_roll['P'], params_roll['I'],\n params_roll['D'], setpoint=params_roll['sp'],\n output_limits=(-2., 2.), sample_time=self.deltaT)\n\n K_u = 150.\n T_u = 682.66 / 1000. # ms\n # no overshoot\n params_pitch = {'P': K_u/5.,\n 'I': (2. / 5.) * K_u / T_u,\n 'D': K_u*T_u/15.,\n 'sp': 0.}\n self.pitchPID = PID(params_pitch['P'], params_pitch['I'],\n params_pitch['D'], setpoint=params_pitch['sp'],\n output_limits=(-2., 2.), sample_time=self.deltaT)\n K_u = 20.\n T_u = 1621.33 / 1000. # ms\n # PD\n params_yaw = {'P': 0.8 * K_u,\n 'I': 0.,\n 'D': K_u * T_u / 10.,\n 'sp': self.target_yaw}\n self.yawPID = PID(params_yaw['P'], params_yaw['I'], params_yaw['D'],\n setpoint=params_yaw['sp'], output_limits=(-2., 2.),\n sample_time=self.deltaT, error_map=pi_clip)\n\n K_u = 20.\n T_u = 2668.8 / 1000. # ms\n # PD\n params_vert = {'P': 0.8 * K_u,\n 'I': 0.,\n 'D': K_u * T_u / 10.,\n 'sp': self.target_altitude}\n self.vertPID = PID(params_vert['P'], params_vert['I'],\n params_vert['D'], setpoint=params_vert['sp'],\n output_limits=(-5., 5.), sample_time=self.deltaT)\n\n return True", "def __init__(self):\r\n\r\n self.Helpers = Helpers(\"Movidius\")\r\n self.confs = self.Helpers.confs\r\n\r\n self.classes = []\r\n self.ncsGraph = None\r\n self.ncsDevice = None\r\n self.reqsize = None\r\n\r\n self.mean = 128\r\n self.std = 1 / 128\r\n\r\n #mvnc.SetGlobalOption(mvnc.GlobalOption.LOG_LEVEL, 2)\r\n\r\n self.Helpers.logger.info(\"Movidius class initialization complete.\")", "def _initialize(self):\r\n print(\"Set the CP mode to EVSE\")\r\n self.whitebeet.controlPilotSetMode(1)\r\n print(\"Set the CP duty cycle to 100%\")\r\n self.whitebeet.controlPilotSetDutyCycle(100)\r\n print(\"Start the CP service\")\r\n self.whitebeet.controlPilotStart()\r\n print(\"Start SLAC in EVSE mode\")\r\n self.whitebeet.slacStart(1)\r\n time.sleep(2)", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(SetElectromotorsControlRequest, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.ctrl is None:\n self.ctrl = actuators_canopen.msg.ElectromotorsState()\n else:\n self.ctrl = actuators_canopen.msg.ElectromotorsState()", "def initialize_scene(self):\n if Time.now() - self.initial_time > 0.45 and self.should_initialize:\n self.should_initialize = False\n self.background_particle_controller = BackgroundParticlesController()\n self.player_controller = PlayerController()\n self.obstacle_controller_wrapper = ObstacleControllerWrapper()\n self.items_controller = ItemsControllerWrapper()\n self.score_controller = ScoreController()", "def __init__(self, name, rate):\n super(ControlNode, self).__init__(name, rate)\n self.mutex = RLock()\n self.controller = InverseDynamicController()\n self.ready = False\n\n # Physical quantities from sensors\n self.eta2 = np.zeros((3, 1))\n self.ni = np.zeros((6, 1))\n\n # References from topic\n self.speed_ref = np.zeros((3, 1))\n self.eta1_ref_body = Quantity(np.zeros((3, 1)))\n self.eta2_ref = Quantity(np.zeros((3, 1)))\n self.ni_ref = Quantity(np.zeros((6, 1)))\n\n # Error value\n self.ni_tilde = np.zeros((6, 1))\n\n # flags to wait first cycle\n self.reference_flags = {'ll': False, 'rpy': False, 'depth': False}\n\n # ROS\n rospy.init_node(self.node_name, anonymous=False)\n self.node_loop = rospy.Rate(self.node_rate)\n self.StartSubscriptions()\n self.pub_tau = rospy.Publisher('/control/tau', Tau, queue_size=1)\n self.pub_measurement = rospy.Publisher('/measurement', Measurement, queue_size=1)", "def __init__(self):\n self.output = []\n self.ctl = clingo.Control() # Control object for the grounding/solving process", "def _initComponent(self):\n\n self.optimizer = self._initOptimizer()\n self.scheduler = self._initScheduler()", "def autonomousInit(self):\n self.globalInit()\n self.autonomous.start()", "def _InitializeBase(self):\n imuen = rospy.get_param(\"~imuenable\", \"True\")\n if imuen:\n message = 'Startimu\\r'\n else:\n message = 'Startnoimu\\r'\n\n rospy.loginfo(\"Initializing Base \" + message)\n self._WriteSerial(message)\n \n lincorrection = rospy.get_param(\"~linear_correction\", 1.0)\n angcorrection = rospy.get_param(\"~angular_correction\", 0.984)\n message = 'ascale %d %d\\r' % self._GetBaseAndExponent(angcorrection)\n rospy.loginfo(\"Sending correction value: \" + message)\n self._WriteSerial(message)\n message = 'lscale %d %d\\r' % self._GetBaseAndExponent(lincorrection)\n rospy.loginfo(\"Sending correction value: \" + message)\n self._WriteSerial(message)", "async def initialize(self, hw_init=False, init_speed: str = \"200 sec / stroke\"):\n await self.pump_io.initialize()\n # Test connectivity by querying the pump's firmware version\n fw_cmd = Protocol1Command(command=\"U\", target_pump_num=self.address)\n self.metadata.version = await self.pump_io.write_and_read_reply_async(fw_cmd)\n logger.info(\n f\"Connected to Hamilton ML600 {self.name} - FW version: {self.metadata.version}!\"\n )\n\n if hw_init:\n await self.initialize_pump(speed=ureg.Quantity(init_speed))", "async def _hw_init(self):\n await self._write_async(b\":XR\\r\") # Broadcast: initialize + execute\n # Note: no need to consume reply here because there is none (since we are using broadcast)", "def setup_controller(cls, args, config):\n logging.debug(\"MOLNSController.setup_controller(config={0})\".format(config))\n # name\n if len(args) > 0:\n controller_name = args[0]\n else:\n print \"Usage: molns.py controller setup NAME\"\n return\n try:\n controller_obj = config.get_object(args[0], kind='Controller')\n except DatastoreException as e:\n # provider\n providers = config.list_objects(kind='Provider')\n if len(providers) == 0:\n print \"No providers configured, \" \\\n \"please configure one ('molns provider setup') before initializing controller.\"\n return\n print \"Select a provider:\"\n for n, p in enumerate(providers):\n print \"\\t[{0}] {1}\".format(n, p.name)\n provider_ndx = int(raw_input_default(\"Enter the number of provider:\", default='0'))\n provider_id = providers[provider_ndx].id\n provider_obj = config.get_object(name=providers[provider_ndx].name, kind='Provider')\n logging.debug(\"using provider {0}\".format(provider_obj))\n # create object\n try:\n controller_obj = config.create_object(ptype=provider_obj.type, name=controller_name, kind='Controller',\n provider_id=provider_id)\n except DatastoreException as e:\n print e\n return\n setup_object(controller_obj)\n config.save_object(controller_obj, kind='Controller')", "def __init__(self, mb_info, switch_config):\n self.microblaze = Arduino(mb_info, ARDUINO_MAILBOX_PROGRAM)\n self.iop_switch_config = switch_config", "def test_initialization(self, create_controller: Controller) -> None:\n pass", "def __init__(self):\n \n # Publishers\n self.pub_vel_prop = rospy.Publisher('/aer1217_ardrone/vel_prop', \n MotorCommands, queue_size=300)\n \n self.model_name = 'ARDroneCarre'\n \n self.pub_vicon_data = rospy.Publisher('/vicon/{0}/{0}'.format(\n self.model_name),\n TransformStamped, queue_size=30)\n\n \n # Subscribers\n self.sub_gazebo_pose = rospy.Subscriber('/aer1217_ardrone/gazebo_state', \n GazeboState,\n self.update_quadrotor_state)\n \n self.sub_cmd_vel = rospy.Subscriber('cmd_vel_RHC', \n Twist,\n self.update_offboard_command)\n \n \n # Initialize messages for publishing\n self.vel_prop_msg = MotorCommands()\n self.quadrotor_state = TransformStamped()\n \n # Run the onboard controller at 200 Hz\n self.onboard_loop_frequency = 200.\n \n # Create an onboard controller for calculation of the motor commands\n self.onboard_controller = ARDroneOnboardController()\n \n # Run this ROS node at the onboard loop frequency\n self.pub_prop_vel = rospy.Timer(rospy.Duration(1. / \n self.onboard_loop_frequency), self.update_motor_speeds)\n \n # Keep time for differentiation and integration within the controller\n self.old_time = rospy.get_time()", "def _initialize_hardware(self):\n # Import\n try:\n from gpiozero import MCP3008\n except Exception as ex:\n logging.error('\\n *** ERROR importing gpiozero: {}'.format(ex))\n\n # Things failed, must be running locally, not on a widget, so don't\n # bother initializing the MCP3008\n return\n\n # Initialize the MCP3008\n try:\n self._sensor = MCP3008(channel=0)\n except Exception as ex:\n logging.error('\\n *** ERROR initializing MCP3008: {}'.format(ex))\n return\n\n # Start force loop thread\n threading.Thread(target=self._force_loop, daemon=True).start()", "def teleopInit(self):\n self.Drive.resetEncoder()\n\n self.Drive.disableAutoForward()\n self.Drive.disableAutoTurn()\n self.Drive.disableVision()\n\n self.DS.setWhichVariable(True)\n self.Drive.updateSetpoint(\"teleop\")\n self.DS.setFirstTimeVariable(True)\n self.timer.reset()\n\n self.matchTime.startMode(isAuto=False)", "def __init__(self, para, ini_cond):\n\n # grid\n self.z = np.linspace(0, para['grid']['zmax'], para['grid']['Nlayers']) # grid [m] above ground\n self.dz = self.z[1] - self.z[0] # gridsize [m]\n self.ones = np.ones(len(self.z)) # dummy\n self.zref = para['zref'] # height of forcing data [m]\n \n # moss properties\n self.hc = para['hc'] # canopy height (m)\n self.lad = para['lad'] # shoot-area density (m2m-3)\n self.LAI = sum(self.lad*self.dz)\n \n self.canopy_nodes = np.where(self.lad > 0)[0]\n \n # hydraulic\n self.porosity = para['hydraulic']['porosity']\n self.pF = para['hydraulic']['pF']\n self.Ksat = para['hydraulic']['Ksat']\n self.freezing_curve = para['hydraulic']['freezing_curve']\n \n # radiation\n self.albedo = para['radiation'] # 'PAR', 'NIR'\n self.emissivity = para['radiation']['emissivity']\n self.clump = para['radiation']['clumping']\n self.leaf_angle = para['radiation']['leaf_angle']\n \n #self.radiation = para['radiation']\n \n # compute non-dimensional flow velocity Un = U/ust and momentum diffusivity\n Utop = ini_cond['Utop'] # U/ust at zref\n Ubot = 0.0 # no-slip\n self.Sc = para['Schmidt_nr']\n _, self.Un, self.Kmn, _ = closure_model_U_moss(self.z, self.lad, self.hc, Utop, Ubot) \n \n self.U = None\n self.Ks = None\n self.length_scale = para['length_scale']\n \n self.Switch_WMA = False\n \n # initial states\n self.T = ini_cond['T']\n self.Wtot = ini_cond['Wtot']\n self.Wliq, self.Wice, _ = frozen_water(self.T, self.Wot, fp=self.freezing_curve, To=0.0)\n self.h = water_retention(self.pF, theta=self.Wliq)", "def __init__(self):\n\n self._mh = MasterHead.get_head()", "def __init__(self, meta, api):\n _LOGGER.debug(\"Initializing device: %s\", meta['name'])\n self._id = meta['id']\n self._name = meta['name']\n \"\"\"Husqvarna API stoppped returning model number, HA prior to 2021.9 defaulted to none.\"\"\"\n self._model = None\n self._state = None\n self._mower_status = None\n self._stored_timestamp = None\n self._see = None\n\n # clone already authenticated api client and\n # select automower for this instance\n self._api = copy.copy(api)\n self._api.select_robot(self._id)", "def do_setup_pocs(self, *arg):\n args, kwargs = string_to_params(*arg)\n\n simulator = kwargs.get('simulator', list())\n if isinstance(simulator, str):\n simulator = [simulator]\n\n # TODO(wtgee) Incorporate real power readings\n if 'power' not in simulator:\n simulator.append('power')\n\n if 'POCSTIME' in os.environ:\n print_warning(\"Clearing POCSTIME variable\")\n del os.environ['POCSTIME']\n\n try:\n cameras = create_cameras_from_config(simulator=simulator)\n observatory = Observatory(simulator=simulator, cameras=cameras)\n self.pocs = POCS(observatory, messaging=True)\n self.pocs.initialize()\n except error.PanError as e:\n print_warning('Problem setting up POCS: {}'.format(e))", "def __init__(self, kp, ki, kd, ts):\n self.__kp = kp # Controller's P constant\n self.__kd = kd / ts # Controller's D constant\n self.__ki = ki * ts # Controller's I constant\n self.__ts = ts # Controller's sampling time\n self.__err_previous = None # Controller's previous error (there is no error before t = 0s)\n self.__error_sum = 0 # Controller's cumulative error", "def initialize(self,init):\n logger.info('*** initialize: worker id=%d',self._agent.wid)\n self.commands = {'initialize':None, 'before_do_work':None, 'after_do_work':None, 'finalize':None}\n self.commands.update(init.get(self._agent.wid,{}))\n exec_command(self.commands['initialize'])", "def __init__(__self__, *,\n controller_status: Optional['outputs.CSIPowerMaxStatusControllerStatus'] = None,\n driver_hash: Optional[int] = None,\n last_update: Optional['outputs.CSIPowerMaxStatusLastUpdate'] = None,\n node_status: Optional['outputs.CSIPowerMaxStatusNodeStatus'] = None,\n state: Optional[str] = None):\n if controller_status is not None:\n pulumi.set(__self__, \"controller_status\", controller_status)\n if driver_hash is not None:\n pulumi.set(__self__, \"driver_hash\", driver_hash)\n if last_update is not None:\n pulumi.set(__self__, \"last_update\", last_update)\n if node_status is not None:\n pulumi.set(__self__, \"node_status\", node_status)\n if state is not None:\n pulumi.set(__self__, \"state\", state)", "def __init__(self, encut, magmom, ldaul, Uparam, Jparam, name=\"DFTCL_settings\"):\n\n cl_settings = {\"ISPIN\": 2, \"MAGMOM\": magmom, \"SAXIS\": None, \"LSORBIT\": None, \"LNONCOLLINEAR\": None}\n dftu_settings = {\"LDAU\": \".TRUE.\", \"LDAUU\": Uparam, \"LDATYPE\": 2, \"LDAUL\": ldaul, \"LDAUJ\": Jparam , \"LMAXMIMX\": 4}\n InputParameters.__init__(self, name=name, magnetic_settings=cl_settings, hubbard_settings=dftu_settings)\n self.update_electronic_settings(\"encut\", encut)", "def initialize(self) -> None:\n # Set motors to stop, read encoder values for starting point\n self.drive.arcadeDrive(0, 0)\n self.drive.resetEncoders()", "def __init__(self, pidevice, **kwargs):\n debug('create an instance of ControllerStartup(kwargs=%s)', itemstostr(kwargs))\n\n if not isdeviceavailable([GCS2Commands, GCS21Commands], pidevice):\n raise TypeError('Type %s of pidevice is not supported!' % type(pidevice).__name__)\n\n self.pidevice = pidevice\n self._stages = None\n self._refmodes = None\n self._servo = None\n self._axesnames = None\n self._kwargs = kwargs\n self._databuf = {'servobuf': {}, 'cstdone': []}\n self.prop = {\n 'devname': self.pidevice.devname, 'skipcst': False, 'forcecst': False, 'skipsai': False,\n 'forcesai': False, 'showlog': False, 'skipini': False, 'skiponl': False, 'skipeax': False,\n 'skipref': False, 'forceref': False, 'skipfph': False,\n }", "def _setup(self) -> None:\n # Call base implementation\n super()._setup()\n\n # Configure the low-level integrator\n engine_options = self.simulator.engine.get_options()\n engine_options[\"stepper\"][\"iterMax\"] = 0\n engine_options[\"stepper\"][\"dtMax\"] = min(0.02, self.step_dt)\n engine_options[\"stepper\"][\"logInternalStepperSteps\"] = False\n\n # Set maximum computation time for single internal integration steps\n if self.debug:\n engine_options[\"stepper\"][\"timeout\"] = 0.0\n else:\n engine_options[\"stepper\"][\"timeout\"] = 2.0\n\n # Enable logging of geometries in debug mode\n if self.debug:\n engine_options[\"telemetry\"][\"isPersistent\"] = True\n\n # Update engine options\n self.simulator.engine.set_options(engine_options)\n\n # Set robot in neutral configuration\n qpos = self._neutral()\n framesForwardKinematics(\n self.robot.pinocchio_model, self.robot.pinocchio_data, qpos)", "def setup_one_time_controllers(self):\n self.index_controller = IndexController(self)\n self.opds_feeds = OPDSFeedController(self)\n self.marc_records = MARCRecordController(self)\n self.loans = LoanController(self)\n self.annotations = AnnotationController(self)\n self.urn_lookup = URNLookupController(self)\n self.work_controller = WorkController(self)\n self.analytics_controller = AnalyticsController(self)\n self.profiles = ProfileController(self)\n self.heartbeat = HeartbeatController()\n self.odl_notification_controller = ODLNotificationController(self)\n self.shared_collection_controller = SharedCollectionController(self)\n self.static_files = StaticFileController(self)\n self.rbdproxy = RBDFulfillmentProxyController(self)\n\n from api.lcp.controller import LCPController\n self.lcp_controller = LCPController(self)", "def initialize(self):\n\t\tpcd8544.LCD.initialize(self)\n\t\tRPIO.setup(self._backlight_pin, RPIO.OUT, initial=RPIO.LOW)", "def initialize(self):\n self.lib.Initialize()\n\n self.triggers = {'Internal': 0, 'External': 1, 'External Start': 6,\n 'External Exposure': 7, 'External FVB EM': 9,\n 'Software Trigger': 10,\n 'External Charge Shifting': 12}\n self.savetypes = {'Signed16bits': 1, 'Signed32bits': 2, 'Float': 3}\n\n # Initial values\n\n self.readout_packing_state = False\n self.readout_packing = self.readout_packing_state\n\n self.readout_mode_mode = 'Image'\n self.readout_mode = self.readout_mode_mode\n\n self.photon_counting_mode_state = False\n self.photon_counting_mode = self.photon_counting_mode_state\n\n self.frame_transfer_mode_state = False\n self.frame_transfer_mode = self.frame_transfer_mode_state\n\n self.fan_mode_index = 'onfull'\n self.fan_mode = self.fan_mode_index\n\n self.EM_gain_mode_index = 'RealGain'\n self.EM_gain_mode = self.EM_gain_mode_index\n\n self.cooled_on_shutdown_value = False\n self.cooled_on_shutdown = self.cooled_on_shutdown_value\n\n self.baseline_offset_value = 100\n self.baseline_offset = self.baseline_offset_value\n\n self.adv_trigger_mode_state = True\n self.adv_trigger_mode = self.adv_trigger_mode_state\n\n self.acq_mode = 'Single Scan'\n self.acquisition_mode = self.acq_mode\n\n self.amp_typ = 0\n\n self.horiz_shift_speed_index = 0\n self.horiz_shift_speed = self.horiz_shift_speed_index\n\n self.vert_shift_speed_index = 0\n self.vert_shift_speed = self.vert_shift_speed_index\n\n self.preamp_index = 0\n self.preamp = self.preamp_index\n\n self.temperature_sp = 0 * degC\n self.temperature_setpoint = self.temperature_sp\n\n self.auxout = np.zeros(4, dtype=bool)\n for i in np.arange(1, 5):\n self.out_aux_port[i] = False\n\n self.trigger_mode_index = 'Internal'\n self.trigger_mode = self.trigger_mode_index", "def initialize_core(self):\n # set current session unique id\n self.conf.set(\"accesspoint\", \"current_session\", self.currentSessionID)\n # set interface for shared connection from params\n self.conf.set(\"accesspoint\", \"interface_net\", self.parse_args.interface_net)\n \n if self.parse_args.interface:\n self.conf.set(\"accesspoint\", \"interface\", self.parse_args.interface)\n\n self.all_modules = module_list\n\n # intialize the LoggerManager\n # TODO: this change solve IndexError: list index out of range\n # but not a definitive solution\n self.logger_manager = LoggerManager(self)\n self.coreui = DefaultController(self)\n\n # print(self.coreui.Plugins)\n self.proxy_controller = self.coreui.getController(\"proxy_controller\")\n self.mitm_controller = self.coreui.getController(\"mitm_controller\")\n self.wireless_controller = self.coreui.getController(\"wireless_controller\")\n self.dhcp_controller = self.coreui.getController(\"dhcp_controller\")\n self.dns_controller = self.coreui.getController(\"dns_controller\")\n self.uiwid_controller = self.coreui.getController(\"ui_controller\")\n\n self.parser_list_func = {\n # parser_set_proxy is default extend class\n \"parser_set_proxy\": self.proxy_controller.pumpkinproxy,\n \"parser_set_plugin\": self.mitm_controller.sniffkin3,\n \"parser_set_mode\": self.wireless_controller.Settings,\n \"parser_set_security\": self.wireless_controller.Settings,\n \"parser_set_hostapd_config\": self.wireless_controller.Settings,\n \"parser_set_dhcpconf\": self.wireless_controller.Settings,\n \"parser_set_dhcpmode\": self.dns_controller.Active,\n }\n self.parser_autcomplete_func = {}\n\n # hook function (plugins and proxies)\n self.intialize_hook_func(self.proxy_controller)\n self.intialize_hook_func(self.mitm_controller)\n\n # register autocomplete set security command\n self.parser_autcomplete_func[\n \"parser_set_security\"\n ] = self.wireless_controller.Settings.getCommandsSecurity\n self.parser_autcomplete_func[\n \"parser_set_hostapd_config\"\n ] = self.wireless_controller.Settings.getCommandsHostapd\n self.parser_autcomplete_func[\n \"parser_set_dhcpconf\"\n ] = self.wireless_controller.Settings.getCommandsDhcpConf\n self.parser_autcomplete_func[\n \"parser_set_dhcpmode\"\n ] = self.dns_controller.getCommandsDhcpMode\n\n self.commands = {\n \"interface\": \"interface\",\n \"interface_net\": \"interface_net\",\n \"ssid\": \"ssid\",\n \"bssid\": \"bssid\",\n \"channel\": \"channel\",\n \"proxy\": None, # only for settings proxy\n \"plugin\": None, # only for settings plugin\n \"mode\": None, # only for settings mdoe\n \"dhcpconf\": None, # only for settings dhcpconf\n \"dhcpmode\": None, # only for settings dhcpmode\n \"security\": \"enable_security\",\n \"hostapd_config\": \"enable_hostapd_config\",\n }\n\n # get all command plugins and proxies\n for ctr_name, ctr_instance in self.coreui.getController(None).items():\n if hasattr(ctr_instance, \"getInfo\"):\n for plugin_name, plugins_info in ctr_instance.getInfo().items():\n self.commands[plugin_name] = \"\"\n\n self.threads = {\"RogueAP\": [], \"Modules\": {}}", "def __init__(self, rexarm):\n self.idle = True\n self.rexarm = rexarm\n self.initial_wp = None\n self.final_wp = None\n self.dt = 0.05 # command rate\n self.desired_speed = 0.75\n self.speed_multiplier = 1.0\n self.is_init = True\n self.is_final = False", "def __init__(self, root, io):\n parts.hand.Hand.__init__(self, root=root, io=io)\n\n dxl_motors = OrderedDict({\n name: dict(conf)\n for name, conf in self.dxl_motors.items()\n })\n\n self.attach_dxl_motors(dxl_motors)\n\n \"\"\"\n self._load_sensor = self.io.find_module('force_gripper')\n self._load_sensor.offset = 4\n self._load_sensor.scale = 10000\n \"\"\"", "def init(self):\n self.mrs = []\n if self.args.block_ctrl is not None:\n self.block_ctrl_list = self.args.block_ctrl.split(\",\")\n # Verify User input as per 0,1,2 format.\n if \"\" in self.block_ctrl_list:\n raise SALError(\"Invalid Input:[%s] to block controller \"\n \"arguments given to scripts!!!\"\n % (self.block_ctrl_list))\n else:\n self.block_ctrl_list = \"\"\n # Create MR instance for controller index using --ctrl input.\n self.mrs.append(create_mradapter(ctrl_index=self.args.ctrl))\n self.ctrl_cnt = self.mrs[0].cli.controller_count()\n for index in range(0, self.ctrl_cnt):\n # Check for Block controller list.\n if str(index) not in self.block_ctrl_list:\n # Check for --Ctrl index given as arg to script.\n if index != self.args.ctrl:\n self.log.info(\"Creating MR instance for Controller-%d\"\n % (index))\n self.mrs.append(create_mradapter(ctrl_index=index))\n else:\n self.log.info(\"*****TC will not execute on Blocked \"\n \"Controller-%d*****\" % (index))\n for mr in self.mrs:\n if not mr.is_mr():\n raise SALError(\"This script is applicable only for MR \"\n \"controller cards\")", "def __init__(self):\n config = self.read_config()\n self.deployment = config['deployment']\n self.deployment_config = config[self.deployment]\n logger.info(f'Initializing storage client with the {self.deployment} deployment config {pformat(self.deployment_config)}')\n\n # get the MLOS config from the user else default it from the deployment config file\n # self.mlos_config = config['MLOS']\n # logger.info(f'Initializing storage client with the MLOS config {pformat(self.mlos_config)}')\n\n # setup the mount path\n if self.deployment == \"LOCAL\":\n self.mount_dir = self.setup_mount()\n logger.info(f'Mount directory setup completed: {self.mount_dir}')", "def _initialize_data(self):\n self.reset_count = 0\n self._idn_no_firmware = \"KEPCO,BOP 50-20,E1234,\"\n self._firmware = 2.6\n self._init_data()", "def __init__(self,n,\n d=3,\n maxn=125,\n controllers=[],\n xmax=XMAX,\n ymax=YMAX,\n zmax=ZMAX,\n vmax=VMAX,\n simbox=None,\n mass=1.0,\n rinit=None,\n source=None,\n side=(5,5,5),\n integrator='rk4',\n spacing=0.1):\n self.n = n\n self.dim = d\n self.maxn = maxn\n self.dt = 0.0\n self.steps = 0\n\n # Basic mechanical properties\n if not simbox:\n self.box = box.MirrorBox(p=self,xmax=xmax,ymax=ymax,zmax=zmax)\n else:\n self.box = simbox\n \n # Select integrator\n integrator_mapping = {'euler':euler,\n 'ieuler':imp_euler,\n 'rk4':rk4}\n\n self.step = integrator_mapping[integrator]\n\n # Start with a random configuration\n self.r = self.box.xmax * np.random.random([self.maxn,self.dim])\n self.m = np.zeros(self.maxn,dtype=float)\n self.v = vmax * (np.random.random([self.maxn,self.dim]) - 0.5)\n self.rdot = np.zeros(self.r.shape)\n self.vdot = np.zeros(self.v.shape)\n self.mdot = np.zeros(self.m.shape)\n\n if rinit == 'grid':\n self.r[0:n,:] = configuration.grid3d(n,side,(xmax/2.,ymax/2.,zmax/2.)\n ,spacing=spacing)\n elif rinit == 'fcc':\n self.r[0:n,:] = configuration.fcc3d(n,side,(xmax/2.,ymax/2.,zmax/2.)\n ,spacing=spacing)\n elif rinit == 'load':\n # Load the configuration from a target.\n # Today the target is hard-coded\n source = os.environ.get('SPDATA') + '/' + source #nanobox_eq_2.nc'\n read_step(source,self,step='last')\n # Make some assumptions about the source file.\n\n # Initialise values\n #self.r[0:self.n]=configuration.grid3d(self.n,5,5,(20,20,20),spacing=0.8)\n self.m[:] = mass \n self.colour = 1.0,0.0,0.0 \n\n # State vectors to pass to numerical integrators\n n_variables = 7\n self.x = np.zeros([n_variables,self.maxn])\n self.xdot = np.zeros([n_variables,self.maxn])\n\n self.nlists = []\n self.forces = []\n\n self.controllers = controllers\n for controller in self.controllers:\n controller.bind_particles(self)\n\n \"\"\" Variables for measuring performance. \"\"\"\n self.timing = {}\n self.timing['force time'] = -1\n self.timing['deriv time'] = -1\n self.timing['pairsep time'] = -1\n self.timing['update time'] = -1\n self.timing['integrate time'] = -1", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(MPC_ACCRequest, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.u0 is None:\n self.u0 = 0.\n if self.h0 is None:\n self.h0 = 0.\n if self.vl is None:\n self.vl = 0.\n if self.i0 is None:\n self.i0 = 0.\n if self.wv is None:\n self.wv = 0.\n if self.wh is None:\n self.wh = 0.\n if self.wi is None:\n self.wi = 0.\n if self.h_stop is None:\n self.h_stop = 0.\n if self.T_gap is None:\n self.T_gap = 0.\n if self.v_max is None:\n self.v_max = 0.\n if self.v_min is None:\n self.v_min = 0.\n if self.h_min is None:\n self.h_min = 0.\n if self.i_max is None:\n self.i_max = 0.\n if self.i_min is None:\n self.i_min = 0.\n else:\n self.u0 = 0.\n self.h0 = 0.\n self.vl = 0.\n self.i0 = 0.\n self.wv = 0.\n self.wh = 0.\n self.wi = 0.\n self.h_stop = 0.\n self.T_gap = 0.\n self.v_max = 0.\n self.v_min = 0.\n self.h_min = 0.\n self.i_max = 0.\n self.i_min = 0.", "def init_lens(self):\n\n response = self.send_lens_cmd(['00'], fast_mode=False)\n response = self.send_lens_cmd(['0A', '00'], fast_mode=False)\n\n if response['MISO'][1] != 'AA':\n print(response['return_str'])\n raise RuntimeError('Lens initialisation failed')\n\n response = self.send_lens_cmd(['0A', '00'], fast_mode=True)\n\n cmd = ['80', '0A']\n for n in range(10):\n cmd.append('00')\n\n response = self.send_lens_cmd(cmd, fast_mode=True)\n\n self._min_FL = int('0x' + response['MISO'][4], 16)\n self._max_FL = int('0x' + response['MISO'][6], 16)\n\n if self.min_FL == self.max_FL:\n self.lens_desc = '{} mm prime lens'.format(self.min_FL)\n else:\n self.lens_desc = '{}-{} mm tele lens'.format(self.min_FL, self.max_FL)\n\n print('initialised {}'.format(self.lens_desc))", "def __init__(self):\n\n self.controller = None\n\n self.game_running = False\n self.menu_view_running = False\n self.end_game_running = False", "def init(self, parameters, agent_parameters):\n pass", "def __init__(self, upstream=None, downstream=None,\n name='', master = None, Kv = 0.0, verbose=0): \n global _pccount\n if name == '':\n name = 'PressureController_'+`_pccount`\n _pccount += 1\n FlowDevice.__init__(self,2,name,verbose)\n if upstream and downstream:\n self.install(upstream, downstream)\n self.setPressureCoeff(Kv)\n self.setMaster(master)", "def initialConfig(self):\r\r\n\r\r\n loggerCmw = logging.getLogger('initialConfig')\r\r\n\r\r\n self.set_scenario()\r\r\n\r\r\n self.set_default_rf_settings()\r\r\n\r\r\n self.physical_downlink_settings()\r\r\n\r\r\n self.physical_uplink_settings()\r\r\n\r\r\n self.connection_config()\r\r\n\r\r\n self.network_settings()\r\r\n\r\r\n self.set_conn_type(conn= self.connTypeEnum.CS)\r\r\n\r\r\n self.waitForCompletion()", "def __init__(self):\n self.hw = dev_hwinfo.device()\n self.ethKey=\"Ethernet\"\n self.ethAllInterfaceName=[]\n dir_path = os.path.dirname(os.path.realpath(__file__))\n self.myDefine = init_define.main()\n self.mPlatform=self.hw.getPlatform()", "def _make_acquisition_controllers(self, auto_init=True):\n for instance_name, _ in self.config_data[\"controllers\"].items():\n self._acquisition_controllers[\n instance_name\n ] = RemoteAcquisitionControl(\n instance_name=instance_name,\n config_data=self.config_data,\n control_socket_wrapper=self._control_socket,\n auto_init=auto_init,\n )", "def __init__(self,\n learning_rates=[0.1, 0.00025],\n state_sizes=[0, 0],\n constraints=None,\n num_constraints=0,\n num_primitive_actions=0,\n num_controllers=0,\n num_controllers_per_subtask=0,\n num_communication_turns=0,\n critic_fn=None,\n controller_subset_fn=None):\n self._meta_controller_state_size = state_sizes[0]\n\n self._num_controllers = num_controllers\n # Number of controllers that communicate to complete a subtask.\n self._num_controllers_per_subtask = num_controllers_per_subtask\n\n # A controller's state size is the input state size (the environment state)\n # + the ordering vector size (num_controllers_per_subtask)\n # + the communication vectors from the communication rounds and output round\n # (num_communication_turns * num_primitive_actions).\n self._controller_state_size = state_sizes[1] \n self._controller_state_size += self._num_controllers_per_subtask\n self._controller_state_size += num_communication_turns * num_primitive_actions\n\n self._meta_controller = DqnAgent(\n state_dims=state_sizes[0],\n num_actions=num_constraints,\n learning_rate=learning_rates[0],\n epsilon_end=0.01)\n\n self._controller = DqnAgent(\n learning_rate=learning_rates[1],\n num_actions=num_primitive_actions,\n state_dims=[self._controller_state_size],\n epsilon_end=0.01)\n\n self._constraints = constraints\n self._num_constraints = num_constraints\n self._num_primitive_actions = num_primitive_actions\n self._num_communication_turns = num_communication_turns\n self._critic_fn = critic_fn\n self._controller_subset_fn = controller_subset_fn\n\n self._intrinsic_time_step = 0\n self._episode = 0\n\n # Book-keeping variables.\n # Keeps track of the current meta-controller state.\n self._meta_controller_state = None\n # Keeps track of the current action selected by the meta-controller.\n self._curr_constraint = None\n # Keeps track of the meta-controller's reward for the current meta-controller time step.\n self._meta_controller_reward = 0\n\n # Keeps track of the constraints tried for current controller subset.\n self._tried_constraints = self.reset_tried_constraints()\n # Keeps track of controllers who have completed coordination in the current episode.\n self._done_controllers = []", "def __init__(self):\n super().__init__()\n\n # Robot state\n self.ask_mode = False\n\n # Connect two large motors on output ports B and C\n self.sound = Sound()\n self.leds = Leds()\n self.p1 = TouchSensor(INPUT_1)\n self.p2 = TouchSensor(INPUT_2)\n self.p3 = TouchSensor(INPUT_3)\n self.p4 = TouchSensor(INPUT_4)", "def _default_setup(self):\n self._n_configs = 1\n self._sn_size = 100\n self._nt = 10000\n self._active_brdch = np.zeros(\n (), dtype=[(\"SIS 3302\", bool, (4, 8)), (\"SIS 3305\", bool, (2, 8))]\n )\n self._active_brdch[\"SIS 3302\"][0][0] = True\n self._active_brdch[\"SIS 3305\"][0][0] = True\n self._config_names = []\n self._active_config = (\"config01\",)\n self._sis3305_mode = 0", "def startup(self):\n # Initializing the cycle data (cd) dictionary\n self.cd[\"started_up\"] = False\n self.cd[\"peak_pressure\"] = 0\n self.cd[\"tidal_volume\"] = 0\n self.cd[\"inhale_duration\"] = 0\n self.cd[\"exhale_duration\"] = 0\n self.cd[\"IE_ratio\"] = 1\n self.cd[\"PEEP\"] = 0\n\n to = 2 # Timeout\n startup_cycles = 0\n limit = 20\n # If the piston position is unknown\n last_cycle = time.time()\n while not self.piston.piston_at_bottom and not self.piston.piston_at_top:\n if self.pst_dir == 1:\n self.piston.pst_up()\n if time.time() - last_cycle > to:\n self.pst_dir = 0\n startup_cycles += 1\n last_cycle = time.time()\n else:\n self.piston.pst_down()\n if time.time() - last_cycle > to:\n self.pst_dir = 1\n startup_cycles += 1\n last_cycle = time.time()\n if startup_cycles >= limit:\n print(\"There is a problem at startup, check compressed air\")\n print(f\"Tried to startup for {startup_cycles} cycles\")\n # Breaks the loop so that the controller doesn't start\n self.signal_startup_error.emit(True)\n return\n while not self.piston.piston_at_top:\n self.piston.pst_up()\n self.piston.stop()\n\n print(f\"startup_cycles: {startup_cycles}\")\n self.cd[\"started_up\"] = True\n self.signal_cycle_data.emit(self.cd)\n # Duration of the first tare of the system\n tare_duration = 5.0\n time.sleep(tare_duration)\n self.signal_get_tare.emit(tare_duration)\n # Waits a little bit just to make sure that the respirator isn't working when the controller \n # is called\n time.sleep(0.5)\n self.piston_control()", "def __init__(self, env):\n super(PlayerOneNetworkControllerWrapper, self).__init__(env)\n buttons = [\"B\", \"A\", \"MODE\", \"START\", \"UP\", \"DOWN\", \"LEFT\", \"RIGHT\", \"C\", \"Y\", \"X\", \"Z\"]\n actions = [['LEFT'], ['RIGHT'], ['LEFT', 'DOWN'], ['RIGHT', 'DOWN'],['LEFT', 'UP'],['RIGHT', 'UP'],\n ['DOWN', 'B'],['LEFT', 'UP'],['RIGHT', 'DOWN','B'],['RIGHT', 'DOWN','A'],\n ['RIGHT', 'UP','B'],['RIGHT', 'UP','A'],['RIGHT', 'UP','C'],\n ['LEFT', 'UP','B'],['LEFT', 'UP','A'],['LEFT', 'UP','C'],\n ['C'],['START'], ['B'],['Y'],['X'],['Z'],['A'],['UP'],['MODE']]\n self._actions = []\n for action in actions:\n arr = np.array([False] * 12)\n for button in action:\n arr[buttons.index(button)] = True\n self._actions.append(arr)\n self.action_space = gym.spaces.Discrete(len(self._actions))", "def _setup_kinematics(self):\n self.kin = Kinematics(robot_name=self.robot_name,\n offset=self.offset,\n active_joint_names=self.get_actuated_joint_names(),\n base_name=\"\", \n eef_name=None,\n frames=self.root\n )\n self._init_transform()", "def setup_initial_state(self):\n # collect the ids of vehicles in the network\n self.ids = self.vehicles.get_ids()\n self.controlled_ids = self.vehicles.get_controlled_ids()\n self.sumo_ids = self.vehicles.get_sumo_ids()\n self.rl_ids = self.vehicles.get_rl_ids()\n\n # dictionary of initial observations used while resetting vehicles after\n # each rollout\n self.initial_observations = dict.fromkeys(self.ids)\n\n # create the list of colors used to different between different types of\n # vehicles visually on sumo's gui\n #TODO: Get these colors working!\n # self.colors = {(255,0,0), (0,255,0),(0,0,255),(255,255,255)}\n self.colors = {}\n key_index = 1\n color_choice = np.random.choice(len(COLORS))\n for i in range(self.vehicles.num_types):\n self.colors[self.vehicles.types[i]] = \\\n COLORS[(color_choice + key_index) % len(COLORS)]\n key_index += 1\n\n for veh_id in self.ids:\n # set the colors of the vehicles based on their unique types\n veh_type = self.vehicles.get_state(veh_id, \"type\")\n self.traci_connection.vehicle.setColor(veh_id,\n self.colors[veh_type])\n\n # add the initial states to the vehicles class\n self.vehicles.set_edge(\n veh_id, self.traci_connection.vehicle.getRoadID(veh_id))\n self.vehicles.set_position(\n veh_id, self.traci_connection.vehicle.getLanePosition(veh_id))\n self.vehicles.set_lane(\n veh_id, self.traci_connection.vehicle.getLaneIndex(veh_id))\n self.vehicles.set_speed(\n veh_id, self.traci_connection.vehicle.getSpeed(veh_id))\n self.vehicles.set_route(\n veh_id, self.available_routes[self.vehicles.get_edge(veh_id)])\n self.vehicles.set_absolute_position(\n veh_id, self.get_x_by_id(veh_id))\n # the time step of the last lane change is always present in\n # the environment,but only used by sub-classes that apply lane\n # changing\n self.vehicles.set_state(veh_id, \"last_lc\",\n -1 * self.lane_change_duration)\n # some constant vehicle parameters\n self.vehicles.set_state(\n veh_id, \"length\",\n self.traci_connection.vehicle.getLength(veh_id))\n self.vehicles.set_state(veh_id, \"max_speed\", self.max_speed)\n\n # import initial state data to initial_observations dict\n self.initial_observations[veh_id] = dict()\n self.initial_observations[veh_id][\"type\"] = veh_type\n self.initial_observations[veh_id][\"edge\"] = \\\n self.traci_connection.vehicle.getRoadID(veh_id)\n self.initial_observations[veh_id][\"position\"] = \\\n self.traci_connection.vehicle.getLanePosition(veh_id)\n self.initial_observations[veh_id][\"lane\"] = \\\n self.traci_connection.vehicle.getLaneIndex(veh_id)\n self.initial_observations[veh_id][\"speed\"] = \\\n self.traci_connection.vehicle.getSpeed(veh_id)\n self.initial_observations[veh_id][\"route\"] = \\\n self.available_routes[self.initial_observations[veh_id][\"edge\"]]\n self.initial_observations[veh_id][\"absolute_position\"] = \\\n self.get_x_by_id(veh_id)\n\n # set speed mode\n self.set_speed_mode(veh_id)\n\n # set lane change mode\n self.set_lane_change_mode(veh_id)\n\n # save the initial state. This is used in the _reset function\n #\n route_id = \"route\" + self.initial_observations[veh_id][\"edge\"]\n pos = self.traci_connection.vehicle.getPosition(veh_id)\n\n self.initial_state[veh_id] = \\\n (self.initial_observations[veh_id][\"type\"], route_id,\n self.initial_observations[veh_id][\"lane\"],\n self.initial_observations[veh_id][\"position\"],\n self.initial_observations[veh_id][\"speed\"], pos)\n\n # collect list of sorted vehicle ids\n self.sorted_ids, self.sorted_extra_data = self.sort_by_position()\n\n # collect headway, leader id, and follower id data\n for veh_id in self.ids:\n headway = self.traci_connection.vehicle.getLeader(veh_id, 2000)\n if headway is None:\n self.vehicles.set_leader(veh_id, None)\n self.vehicles.set_headway(veh_id, 9e9)\n else:\n self.vehicles.set_leader(veh_id, headway[0])\n self.vehicles.set_headway(veh_id, headway[1])\n self.vehicles.set_follower(headway[0], veh_id)\n\n # contains the last lc before the current step\n self.prev_last_lc = dict()\n for veh_id in self.ids:\n self.prev_last_lc[veh_id] = self.vehicles.get_state(veh_id,\n \"last_lc\")\n\n # subscribe the requested states for traci-related speedups\n for veh_id in self.ids:\n self.traci_connection.vehicle.subscribe(\n veh_id, [tc.VAR_LANE_INDEX, tc.VAR_LANEPOSITION,\n tc.VAR_ROAD_ID, tc.VAR_SPEED])\n self.traci_connection.vehicle.subscribeLeader(veh_id, 2000)", "def autonomousInit(self):\n '''\n self.cumulativeTime=0\n self.totalTime=0\n self.dataSet=[[-0.5,0,1,-1.0],[0.3,0.4,1,1.0],[-0.5,0,1,-1.0]]\n for i in self.dataSet:\n self.totalTime+=i[2]\n self.intervals = 0\n self.currentTime = 0\n for i in range(0,len(self.dataSet)):\n self.dataSet[i].append([self.currentTime,self.currentTime+self.dataSet[i][2]])\n self.currentTime+=self.dataSet[i][2]\n for i in self.dataSet:\n if i[3]==1.0:\n i.append(\"Forward\")\n if i[3]==-1.0:\n i.append(\"Backward\")\n \n self.timer.reset()\n self.timer.start()\n '''\n self.timer.reset()\n self.timer.start()\n\n #self.auto = self.chooser.getSelected()\n self.auto = 6\n self.autoState = 0\n #self.auto = 1\n\n self.EC1.reset()\n \n\n #self.auto = self.chooser.getSelected()", "def start(self):\n self.__init__()\n self.set_n_players()\n self.init_players()\n self.init_territory_selection_phase()\n self.init_troop_deployment_phase()\n # self.game_phase()" ]
[ "0.6525128", "0.6433289", "0.63405937", "0.6292362", "0.6253625", "0.622259", "0.6191588", "0.6181524", "0.6158765", "0.61208683", "0.61065567", "0.6085143", "0.60849625", "0.60765624", "0.60523504", "0.6051844", "0.6037136", "0.60301024", "0.60288566", "0.6012272", "0.6000022", "0.59649456", "0.5962679", "0.5959399", "0.5936196", "0.5911271", "0.5903587", "0.58915323", "0.5879826", "0.58797616", "0.5879328", "0.58734477", "0.5872237", "0.5857456", "0.5850251", "0.58458245", "0.5837293", "0.5826828", "0.5820623", "0.5820584", "0.5819305", "0.5819055", "0.5814455", "0.57975274", "0.57869196", "0.57842654", "0.5783264", "0.57575935", "0.57488054", "0.5745988", "0.57434714", "0.5743337", "0.5738614", "0.5725746", "0.5721594", "0.5719271", "0.5709122", "0.5708437", "0.5696969", "0.56944025", "0.56916916", "0.5690859", "0.5689226", "0.5681987", "0.5679197", "0.56785", "0.5677627", "0.5673922", "0.56702477", "0.5665267", "0.5664565", "0.5663787", "0.56584495", "0.56536674", "0.5652308", "0.5644078", "0.5642655", "0.5641258", "0.56374395", "0.5629171", "0.5627245", "0.56233543", "0.561557", "0.56118006", "0.56081176", "0.5599533", "0.5594704", "0.5593179", "0.55855674", "0.5584906", "0.55791956", "0.55760854", "0.55751956", "0.5574787", "0.5574583", "0.557319", "0.5573039", "0.5570102", "0.5568554", "0.5566421", "0.5565978" ]
0.0
-1
velocity callback for the arm
def get_velocity(self, message): #print('**************** vel ') self.velocity = message.data self.state[0:self.ndegres] = self.velocity[0:self.ndegres]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rover_velocity_callback(self, msg):\n\t\t# print(\"Rover velocity callback message: {}\".format(msg))\n\t\tpass", "def cmd_vel_callback(self, msg):\n # Just store the desired velocity. The actual control runs on odometry callbacks\n v_l = msg.linear\n v_a = msg.angular\n self.v_linear_des = numpy.array([v_l.x, v_l.y, v_l.z])\n self.v_angular_des = numpy.array([v_a.x, v_a.y, v_a.z])", "def _cb_cmd_vel(self,msg):\r\n print \"Walker velocity command received: \",msg\r\n vx=msg.linear.x\r\n vy=msg.linear.y\r\n vt=msg.angular.z\r\n self.start()\r\n self.set_desired_velocity(vx,vy,vt)", "def vel(self, *args, **kwargs) -> Any:\n pass", "def cmdVelCallback(self, req):\n x = req.linear.x # m/s\n th = req.angular.z # rad/s\n\n if x == 0:\n # Turn in place\n right = th * self.wheel_track * self.gear_reduction / 2.0\n left = -right\n elif th == 0: \n # Pure forward/backward motion\n left = right = x\n else:\n # Rotation about a point in space\n left = x - th * self.wheel_track * self.gear_reduction / 2.0\n right = x + th * self.wheel_track * self.gear_reduction / 2.0\n\n # Set motor speeds in meters per second.\n self.mySerializer.mogo_m_per_s([1, 2], [left, right])", "def cmd_vel_callback(self, msg):\n with self._cmd_vel_lock:\n self._x_linear_cmd = msg.linear.x\n self._z_angular_cmd = msg.angular.z\n self._last_cmd_vel_time = rospy.get_rostime()", "def cmd_velocity(self, vn, ve, vd, heading):\n pass", "def target_velocity(self, time):\n pass", "def target_velocity(self, time):\n pass", "def _twist_callback(self, cmd):\n self.set_velocity(cmd.linear.x, cmd.angular.z)", "def _vel_callback(self, msg):\n self.joints_vels = msg.data\n self.compute_torques('vel')", "def new_velocity(self):\n self.velocity = self.vafter", "def base_velocity(self):\n raise NotImplementedError('Not yet implemented!')", "def getVelocity(self):\n return self.v", "def apply_velocity(self):\n self.position.data += self.velocity.data", "def cb_current_velocity(self,msg):\n # log message\n # rospy.logdebug('DBWNode::velocity_cb %s',msg)\n # store message\n self.current_twist = msg.twist\n self.current_velocity = msg.twist.linear.x", "def arm_calibration(self):\n # DONE: 3. Implement the arm calibration movement by fixing the code below (it has many bugs). It should to this:\n # Command the arm_motor to run forever in the positive direction at max speed.\n # Create an infinite while loop that will block code execution until the touch sensor's is_pressed value is True.\n # Within that loop sleep for 0.01 to avoid running code too fast.\n # Once past the loop the touch sensor must be pressed. So stop the arm motor quickly using the brake stop action.\n # Make a beep sound\n # Now move the arm_motor 14.2 revolutions in the negative direction relative to the current location\n # Note the stop action and speed are already set correctly so we don't need to specify them again\n # Block code execution by waiting for the arm to finish running\n # Make a beep sound\n # Set the arm encoder position to 0 (the last line below is correct to do that, it's new so no bug there)\n\n # Code that attempts to do this task but has MANY bugs (nearly 1 on every line). Fix them!\n self.arm_motor.run_forever(speed_sp=900)\n while not self.touch_sensor.is_pressed:\n time.sleep(0.01)\n self.arm_motor.stop(stop_action='brake')\n ev3.Sound.beep().wait()\n # time.sleep(2)\n # arm_motor.stop(stop_action='brake')\n\n arm_revolutions_for_full_range = 14.2 * 360\n self.arm_motor.run_to_rel_pos(position_sp=-arm_revolutions_for_full_range, speed_sp=900)\n self.arm_motor.wait_while(ev3.Motor.STATE_RUNNING)\n ev3.Sound.beep()\n self.arm_motor.position = 0 # Calibrate the down position as 0 (this line is correct as is).", "def velocity(self,l=None, t=None):\n raise RuntimeError('this is a virtual method that must be overwritten')\n return", "def velocity_callback(self, msg_velocity):\n if self.last_pose.header.stamp.to_sec() > 0: # skip first frame\n\n dt = (msg_velocity.header.stamp - self.last_pose.header.stamp).to_sec()\n\n # Integrate the relative movement between the last pose and the current\n theta_delta = self.last_theta_dot * dt\n # to ensure no division by zero for radius calculation:\n if np.abs(self.last_theta_dot) < 0.000001:\n # straight line\n x_delta = self.last_v * dt\n y_delta = 0\n else:\n # arc of circle\n radius = self.last_v / self.last_theta_dot\n x_delta = radius * np.sin(theta_delta)\n y_delta = radius * (1.0 - np.cos(theta_delta))\n\n # Add to the previous to get absolute pose relative to the starting position\n theta_res = self.last_pose.theta + theta_delta\n x_res = self.last_pose.x + x_delta * np.cos(self.last_pose.theta) - y_delta * np.sin(self.last_pose.theta)\n y_res = self.last_pose.y + y_delta * np.cos(self.last_pose.theta) + x_delta * np.sin(self.last_pose.theta)\n\n # Update the stored last pose\n self.last_pose.theta = theta_res\n self.last_pose.x = x_res\n self.last_pose.y = y_res\n\n # Stuff the new pose into a message and publish\n msg_pose = Pose2DStamped()\n msg_pose.header = msg_velocity.header\n msg_pose.header.frame_id = self.veh_name\n msg_pose.theta = theta_res\n msg_pose.x = x_res\n msg_pose.y = y_res\n self.pub_pose.publish(msg_pose)\n\n self.last_pose.header.stamp = msg_velocity.header.stamp\n self.last_theta_dot = msg_velocity.omega\n self.last_v = msg_velocity.v", "def apply_velocity(self, angles, velocity, phase, x):\r\n \r\n # VX\r\n v=velocity[0]*self.parameters[\"vx_amplitude\"]\r\n d=(x*2-1)*v\r\n if phase:\r\n angles[\"l_thigh_joint\"]+=d\r\n angles[\"l_ankle_joint\"]+=d\r\n angles[\"r_thigh_joint\"]+=d\r\n angles[\"r_ankle_joint\"]+=d\r\n else:\r\n angles[\"l_thigh_joint\"]-=d\r\n angles[\"l_ankle_joint\"]-=d\r\n angles[\"r_thigh_joint\"]-=d\r\n angles[\"r_ankle_joint\"]-=d\r\n\r\n # VY\r\n v=velocity[1]*self.parameters[\"vy_amplitude\"]\r\n d=(x)*v\r\n d2=(1-x)*v\r\n if v>=0:\r\n if phase:\r\n angles[\"l_hip_joint\"]-=d\r\n angles[\"l_foot_joint\"]-=d\r\n angles[\"r_hip_joint\"]+=d\r\n angles[\"r_foot_joint\"]+=d\r\n else:\r\n angles[\"l_hip_joint\"]-=d2\r\n angles[\"l_foot_joint\"]-=d2\r\n angles[\"r_hip_joint\"]+=d2\r\n angles[\"r_foot_joint\"]+=d2\r\n else:\r\n if phase:\r\n angles[\"l_hip_joint\"]+=d2\r\n angles[\"l_foot_joint\"]+=d2\r\n angles[\"r_hip_joint\"]-=d2\r\n angles[\"r_foot_joint\"]-=d2\r\n else:\r\n angles[\"l_hip_joint\"]+=d\r\n angles[\"l_foot_joint\"]+=d\r\n angles[\"r_hip_joint\"]-=d\r\n angles[\"r_foot_joint\"]-=d\r\n \r\n ## VT\r\n #v=velocity[2]*self.parameters[\"vt_amplitude\"]\r\n #d=(x)*v\r\n #d2=(1-x)*v\r\n #if v>=0:\r\n #if phase:\r\n #angles[\"j_pelvis_l\"]=-d\r\n #angles[\"j_pelvis_r\"]=d\r\n #else:\r\n #angles[\"j_pelvis_l\"]=-d2\r\n #angles[\"j_pelvis_r\"]=d2\r\n #else:\r\n #if phase:\r\n #angles[\"j_pelvis_l\"]=d2\r\n #angles[\"j_pelvis_r\"]=-d2\r\n #else:\r\n #angles[\"j_pelvis_l\"]=d\r\n #angles[\"j_pelvis_r\"]=-d\r", "def velocity_cmd_callback(self, data):\n with self.lock:\n self.twist = data", "def loopVehicle( initPos = (40, 40, 0), theta = 3/2*pi, phi = 0, delta = 0.5, W = 4, alpha = 0):\r\n \r\n initAxis = (cos(theta)*cos(phi), sin(theta)*cos(phi), sin(phi))\r\n vehicle = vs.box(pos=initPos, size=(W,W,0.2), color = clr.green, axis = initAxis,\r\n make_trail=True)\r\n vehicle.trail_object.radius = 0.2\r\n vehicle.velocity = vs.vector(initAxis)\r\n deltat = delta\r\n vscale = 8\r\n varr = vs.arrow(pos=vehicle.pos, axis=vscale*vehicle.velocity, color=clr.yellow)\r\n\r\n while True:\r\n vs.rate(1000)\r\n\r\n orthV = makeHoriVector(theta+pi/2)\r\n orthV2 = makeVector(theta, phi+pi/2)\r\n lSensorPos = tuple(int(x) for x in (vehicle.pos+orthV*W/2).astuple())\r\n rSensorPos = tuple(int(x) for x in (vehicle.pos-orthV*W/2).astuple())\r\n #print(rSensorPos)\r\n\r\n if (getDensity(lSensorPos) > getDensity(rSensorPos)):\r\n theta = theta + pi/180\r\n elif getDensity(lSensorPos) < getDensity(rSensorPos):\r\n theta = theta - pi/180\r\n\r\n if (getDensity(lSensorPos) + getDensity(rSensorPos))/2 > thre_den:\r\n return\r\n\r\n vehicle.velocity = makeVector(theta, phi)\r\n \r\n vehicle.pos = vehicle.pos+vehicle.velocity*deltat*sigmoid(getDensity(lSensorPos)+getDensity(rSensorPos), alpha)\r\n vehicle.axis = vehicle.velocity\r\n vehicle.size=(W,W,0.2)\r\n\r\n varr.pos = vehicle.pos\r\n varr.axis = vehicle.velocity*vscale", "def callback_cmdvel(msg):\n global _stop\n rospy.logdebug(\"received cmd_vel: (%f,%f)\", msg.linear.x, msg.angular.z)\n msgtosend = msg\n if _stop and msg.linear.x > 0:\n msgtosend.linear.x = 0\n rospy.logdebug(\"reset cmd_vel(.linear.x)\")\n _pub_cmdvel.publish(msg)", "def setVelocity(self, vfunc):\n n = 0\n if vfunc: n = vfunc.func_id()\n _cantera.wall_setVelocity(self.__wall_id, n)", "def arm_calibration(self):\n self.arm_motor.run_forever(speed_sp=self.MAX_SPEED)\n while not self.touch_sensor.is_pressed:\n time.sleep(0.01)\n self.arm_motor.stop()\n ev3.Sound.beep().wait()\n arm_revolutions_for_full_range = 14.2 * 360\n self.arm_motor.run_to_rel_pos(\n position_sp=-arm_revolutions_for_full_range,\n speed_sp=self.MAX_SPEED,\n stop_action=ev3.Motor.STOP_ACTION_BRAKE)\n self.arm_motor.wait_while(ev3.Motor.STATE_RUNNING)\n\n self.arm_motor.position = 0 # Calibrate the down position as 0 (this\n # line is correct as is).", "def arm_calibration(self):\n self.arm_motor.run_forever(speed_sp=900)\n while not self.touch_sensor.is_pressed:\n time.sleep(0.01)\n self.arm_motor.stop(stop_action=\"brake\")\n ev3.Sound.beep()\n self.arm_motor.run_to_rel_pos(\n speed_sp=900, position_sp=-5100)\n self.arm_motor.wait_while(ev3.Motor.STATE_RUNNING)\n print('motor is no longer running')\n ev3.Sound.beep()\n self.arm_motor.position = 0", "def update_vehicle_state(self):\n #vel = self.v + self.commands['throttle']/self.m/self.simulation_rate\n\n vel = self.commands['speed']\n steer = self.commands['steering_angle']\n\n if steer > 0.5:\n steer_cmd = 25\n elif steer < -0.5:\n steer_cmd = 185\n else:\n steer_cmd = 100 - 160*steer ##linear\n #steer_cmd = 100 - 640*steer**3 ##cubic\n\n #rospy.logwarn('Velocity command is '+ str(vel))\n # 130 is the lowest vel_cmd that makes the truck move.\n if vel > 12:\n vel_cmd = 161\n elif vel < 0:\n vel_cmd = 0\n else:\n vel_cmd = 3.77*vel + 117\n # rospy.logerr('throttle: ' + str(throttle))\n hw_port.set_command(vel_cmd,steer_cmd,self.vehicle_id)", "def velocity(self, *args):\n self.lmp.command('velocity' + (' {}' * len(args)).format(*args))", "def update_velocity(self):\n # Set thruster (up/down) movement\n if self.thrusters:\n self.velocity_y -= self.gravity\n else:\n self.velocity_y += self.velocity_slowing\n\n # Set left movement\n if self.moving_left:\n self.velocity_x -= self.gravity\n else:\n if self.velocity_x < 0:\n self.velocity_x += self.velocity_slowing\n \n # Set right movement\n if self.moving_right:\n self.velocity_x += self.gravity\n else:\n if self.velocity_x > 0:\n self.velocity_x -= self.velocity_slowing", "def _update_vel(self) -> None:\n self.state[:, :, Boids.Attr.VEL] += self.state[:, :, Boids.Attr.ACC]\n self.state[:, :, Boids.Attr.VEL] = maglim(\n self.state[:, :, Boids.Attr.VEL], self.max_vel)", "def robot_cmd_vel_cb(self, msg):\n if self.is_robot_moving and msg.linear.x == 0 and msg.angular.z == 0:\n self.is_robot_moving = False\n self.logger.log('ROS_Moving', 'ROS_Moving', 'STILL', category='state')\n elif not self.is_robot_moving and (msg.linear.x != 0 or msg.angular.z != 0):\n self.is_robot_moving = True\n self.logger.log('ROS_Moving', 'ROS_Moving', 'MOVING', category='state')", "def default_velocity(self) -> int:\r\n ...", "def target_velocity(self, s):\n # YOUR CODE HERE\n if s > self.total_length:\n s = self.total_length\n angular = np.array([0, 0 ,0])\n else:\n angular = np.array([0,0,self.targ_angular])\n\n theta = s/self.total_length*self.angle\n v_x = np.cos(theta)*self.targ_speed\n if self.left_turn:\n v_y = np.sin(theta)*self.targ_speed\n else:\n v_y = -np.sin(theta)*self.targ_speed\n \n linear = np.array([v_x, v_y, 0])\n # linear = np.append(np.dot(self.g, np.array([v_x, v_y, 0]))[:2], [0])\n \n toRet = [linear, angular]\n \n return toRet", "def change_velocity(self, delta):\n self.velocity += delta", "def change_velocity(self, delta):\n self.velocity += delta", "def target_velocity(self, s):\n raise NotImplementedError()", "def update(self):\r\n # change in position -> velocity\r\n self.position += self.velocity\r\n # change in celocity -> acceleration\r\n self.velocity += self.acceleration\r\n \r\n # if velocity magnitude is higher than the defined limit set the velocity \r\n # magnitude to max speed\r\n if np.linalg.norm(self.velocity) > self.max_speed:\r\n self.velocity = self.velocity / np.linalg.norm(self.velocity) * self.max_speed\r\n \r\n # reset the acceleration\r\n self.acceleration = Vector(*np.zeros(2))", "def update(self):\n self.velocity = [math.cos(self.angle), - math.sin(self.angle)]\n self.velocity = [self.speed * i for i in self.velocity]\n\n super().update()", "def setRobotVelocity(self,vel):\n linear = vel[0]\n angular = vel[1]\n if not (self.orientation):\n # Kinematic model for differential robot.\n print(\"or 0\")\n\n wl = (linear - (WHEEL_SEPARATION / 2.) * angular) / WHEEL_DIAMETER\n wr = (linear + (WHEEL_SEPARATION / 2.) * angular) / WHEEL_DIAMETER\n\n # At input 1000, angular velocity is 1 cycle / s or 2*pi/s.\n velLeft = int(wl * BETA/(2*math.pi))\n velRight = int(wr * BETA/(2*math.pi))\n print(\"left: \" + str(velLeft))\n print(\"right: \" + str(velRight))\n self.setMotorSpeed(velRight,velLeft)\n else:\n print(\"or 1\")\n # Kinematic model for differential robot.\n wl = (linear - (WHEEL_SEPARATION / 2.) * angular) / WHEEL_DIAMETER\n wr = (linear + (WHEEL_SEPARATION / 2.) * angular) / WHEEL_DIAMETER\n\n # At input 1000, angular velocity is 1 cycle / s or 2*pi/s.\n velLeft = int(wl * BETA/(2*math.pi))\n velRight = int(wr * BETA/(2*math.pi))\n print(\"left: \" + str(velLeft))\n print(\"right: \" + str(velRight))\n self.setMotorSpeed(velRight, velLeft)", "def target_velocity(self, time):\n \"\"\"\n start_point = self.points[self.cur_start]\n cur_target = self.points[(self.cur_start + 1) % 4]\n total_time = self.total_time / 4\n avg_vel = (cur_target - start_point)/ total_time\n return avg_vel\n \"\"\"\n total_time = self.total_time\n if time <= self.total_time/4:\n return self.path1.target_velocity(time)\n\n elif time - total_time/4 == 0:\n rospy.sleep(0.5)\n\n elif time <= self.total_time/2:\n return self.path2.target_velocity(time - (total_time/4 + 0.5))\n\n elif time - total_time/2 == 0:\n rospy.sleep(0.5)\n\n elif time <= self.total_time/4*3:\n return self.path3.target_velocity(time - (total_time/2 + 1))\n\n elif time - total_time/4*3 == 0:\n rospy.sleep(0.5)\n\n\n else:\n return self.path4.target_velocity(time - (total_time/4*3 + 1.5))", "def __init__(self, mass, radius, position, velocity):\n self.mass = mass\n self.radius = radius\n self.position = position\n self.velocity = velocity\n print(self.velocity)\n self.vafter = np.copy(velocity) # temp storage for velocity of next step\n self.delete = False", "def __init__(self, temperature=298 * simtk.unit.kelvin, collision_rate=91.0 / simtk.unit.picoseconds, timestep=1.0 * simtk.unit.femtoseconds):\n super(AndersenVelocityVerletIntegrator, self).__init__(timestep)\n\n #\n # Integrator initialization.\n #\n kT = kB * temperature\n self.addGlobalVariable(\"kT\", kT) # thermal energy\n self.addGlobalVariable(\"p_collision\", timestep * collision_rate) # per-particle collision probability per timestep\n self.addPerDofVariable(\"sigma_v\", 0) # velocity distribution stddev for Maxwell-Boltzmann (computed later)\n self.addPerDofVariable(\"collision\", 0) # 1 if collision has occured this timestep, 0 otherwise\n self.addPerDofVariable(\"x1\", 0) # for constraints\n\n #\n # Update velocities from Maxwell-Boltzmann distribution for particles that collide.\n #\n self.addComputePerDof(\"sigma_v\", \"sqrt(kT/m)\")\n self.addComputePerDof(\"collision\", \"step(p_collision-uniform)\") # if collision has occured this timestep, 0 otherwise\n self.addComputePerDof(\"v\", \"(1-collision)*v + collision*sigma_v*gaussian\") # randomize velocities of particles that have collided\n\n #\n # Velocity Verlet step\n #\n self.addUpdateContextState()\n self.addComputePerDof(\"v\", \"v+0.5*dt*f/m\")\n self.addComputePerDof(\"x\", \"x+dt*v\")\n self.addComputePerDof(\"x1\", \"x\")\n self.addConstrainPositions()\n self.addComputePerDof(\"v\", \"v+0.5*dt*f/m+(x-x1)/dt\")\n self.addConstrainVelocities()", "def set_velocity(self):\r\n if self.direction == 'left':\r\n self.x_vel = -2\r\n else:\r\n self.x_vel = 2\r\n\r\n self.y_vel = 0", "def setRotVelocity(self,r):\n self.rvel = r", "def velocity(self):\n return self.base_platform.velocity", "def get_velocity(self):\n return self.momentum/self.mass", "def xyzArmCallback(msg):\n global robot\n # extract message components and normalize - joystick provides [-100,100] and \n # we will scale to [-0.1,0.1]\n arm_x = msg.data[0]/ARM_DATA_SCALING\n arm_y = msg.data[1]/ARM_DATA_SCALING\n arm_z = msg.data[2]\n\n # conditionally scale Z axis movements\n if(arm_z > 0):\n arm_z = msg.data[2] / ARM_DATA_SCALING_UP_Z\n elif(arm_z < 0):\n arm_z = msg.data[2] / ARM_DATA_SCALING_DOWN_Z\n\n if (arm_x == 0 and arm_y == 0 and arm_z == 0):\n #rospy.loginfo(\"no arm movement requested\")\n i=0 #placeholder\n\n else:\n # displacement = np.array([arm_x, arm_y, arm_z])\n # success = robot.arm.move_ee_xyz(displacement, plan=False)\n # rospy.loginfo(\"tried to move arm\")\n displacement = np.array([arm_x, arm_y, arm_z])\n t,r,q = robot.arm.pose_ee\n if (t[2] < min_z):\n rospy.loginfo(\"arm too low, safety protocol activated with z=%s\",str(t[2]))\n elif(t[2] > max_z):\n rospy.loginfo(\"arm too high, safety protocol activated with z=%s\",str(t[2]))\n else:\n translation = np.add(np.asarray(t).flatten(), displacement)\n orientation = np.asarray(r)\n ident = np.eye(3)\n orientation[:,2] = ident[:,2]\n orientation[2,:] = ident[2,:]\n robot.arm.set_ee_pose(translation, orientation, plan=False)\n rospy.loginfo(\"translation was %s\", str(translation))\n rospy.loginfo(\"orientation was %s\", str(orientation))", "def evaluate(self, *args, **kwargs):\n return self.constant_velocity", "def add_velocity(self, Mextra=0, period=0, model=1):\n \n if self.npart == 0:\n self.vel = transpose(array([[],[]]))\n return\n \n print(\" Adding velocities...\")\n \n if model==0: vel = zeros((self.npart, 2))\n \n elif model in [1,2]:\n print(\" Setting keplerian velocities...\")\n pos = self.pos - self.center\n radii = norm(pos, axis=1)\n self.v_kep = sqrt(Mextra * G / radii)\n if model==2: Mextra += sum(self.mass)\n v_kep = sqrt(Mextra * G / radii)\n vel = matmul(pos / radii[:, newaxis], array([[0, 1], [-1, 0]])) * v_kep[:, newaxis]\n \n\n elif model==3:\n print(\" Setting velocities from binary period...\")\n if period==0:\n print(\" Incorrect period for setting disk velocities.\")\n print(\" Disk velocities are set to zero.\")\n vel = zeros((self.npart, 2))\n \n else:\n pos = self.pos - self.center\n v_ang = 1 / float(period) \n vel = v_ang * matmul(pos, array([[0, 1], [-1, 0]]))\n \n else:\n print(\"Model must be 0, 1, 2 or 3.\")\n print(\" {:d} was given. Exiting.\".format(model))\n exit()\n \n \n self.vel = vel", "def send_body_ned_velocity(velocity_x, velocity_y, velocity_z, duration=0):", "def reduce_velocity(self):\n if self.controls[\"make_velocity_0\"]:\n # print(self.controls[\"bar_move_velocity\"])\n self.controls[\"bar_move_velocity\"] = 0", "def vel(self, time):\n if (time < self.ti):\n t = 0\n elif (time > self.tf):\n t = self.tf - self.ti\n else:\n t = time - self.ti\n return self.a1 + 2.0 * self.a2 * t + 3.0 * self.a3 * pow(t, 2) + 4.0 * self.a4 * pow(t, 3) + 5.0 * self.a5 * pow(t, 4)", "def test_velocity(self):\n sol = Mader(p_cj=3.0e11, d_cj=8.0e5, gamma=3.0, u_piston=0.0)\n # r must contain 2 elements, otherwise the density and pressure are nan\n r = np.array([0.7, 0.8])\n t = 6.25e-6\n solrt = sol(r, t)\n np.testing.assert_allclose(solrt.velocity[0], 144000.0)", "def update_current(self):\n velocity, horizontal_angle, vertical_angle = self.current_function()\n self.set_current_velocity(velocity, horizontal_angle, vertical_angle)", "def twistCallback(msg):\n global robot\n\n\n # extract message components and scale\n fwdRev = (msg.linear.x)/FWD_REV_SCALING\n spin = (msg.angular.z)/SPIN_SCALING\n\n\n # Reduce cross-coupling of commands\n if (abs(spin)<1 and abs(fwdRev)>20): spin=0\n if (abs(fwdRev) < 5 and abs(spin)>5): fwdRev=0\n\n # Pass command to robot base\n execution_time = EXECUTION_TIME\n \n robot.base.set_vel(fwd_speed=fwdRev, \n turn_speed=spin, \n exe_time=execution_time)", "def get_velocity(self):\n return (self._I85_msg_from_device(self.node.sdo[0x606c].phys)) / 10 # rad/s", "def velocity_step(self, dt, force):\r\n self.vel += dt * force / self.mass", "def exec_velocity_cmd(self, cmd):\n joint_names = self.joint_names()\n\n velocity_command = dict(zip(joint_names, cmd))\n\n self.set_joint_velocities(velocity_command)", "def getVelocity(self):\n\n return self.vel", "def __init__(self, temperature=298.0 * simtk.unit.kelvin, collision_rate=91.0 / simtk.unit.picoseconds, timestep=1.0 * simtk.unit.femtoseconds):\n\n # Compute constants.\n kT = kB * temperature\n gamma = collision_rate\n\n # Create a new custom integrator.\n super(VVVRIntegrator, self).__init__(timestep)\n\n #\n # Integrator initialization.\n #\n self.addGlobalVariable(\"kT\", kT) # thermal energy\n self.addGlobalVariable(\"b\", numpy.exp(-gamma * timestep)) # velocity mixing parameter\n self.addPerDofVariable(\"sigma\", 0)\n self.addPerDofVariable(\"x1\", 0) # position before application of constraints\n\n #\n # Allow context updating here.\n #\n self.addUpdateContextState()\n\n #\n # Pre-computation.\n # This only needs to be done once, but it needs to be done for each degree of freedom.\n # Could move this to initialization?\n #\n self.addComputePerDof(\"sigma\", \"sqrt(kT/m)\")\n\n #\n # Velocity perturbation.\n #\n self.addComputePerDof(\"v\", \"sqrt(b)*v + sqrt(1-b)*sigma*gaussian\")\n self.addConstrainVelocities()\n\n #\n # Metropolized symplectic step.\n #\n self.addComputePerDof(\"v\", \"v + 0.5*dt*f/m\")\n self.addComputePerDof(\"x\", \"x + v*dt\")\n self.addComputePerDof(\"x1\", \"x\")\n self.addConstrainPositions()\n self.addComputePerDof(\"v\", \"v + 0.5*dt*f/m + (x-x1)/dt\")\n self.addConstrainVelocities()\n\n #\n # Velocity randomization\n #\n self.addComputePerDof(\"v\", \"sqrt(b)*v + sqrt(1-b)*sigma*gaussian\")\n self.addConstrainVelocities()", "def dvl_callback(self, msg):\n self.mutex.acquire()\n\n self.ni[0] = msg.velocity_instrument.x\n self.ni[1] = msg.velocity_instrument.y\n self.ni[2] = msg.velocity_instrument.z\n\n self.mutex.release()\n rospy.loginfo(\"%s receive dvl\", self.node_name)", "def vel_y(self, *args, **kwargs) -> Any:\n pass", "def listener():\n rospy.Subscriber(\"motion_plan\", FloatList, callback)\n rospy.spin()", "def update_position(self, event):\n\n # Create a copy of the most recent stored twist data to perform calculations\n with self.lock:\n velocity_data = copy.deepcopy(self.twist)\n\n # Time elapsed since last update position call\n if hasattr(event, 'last_real'):\n if event.last_real is None:\n time = rospy.Duration(0.05)\n else:\n time = event.current_real - event.last_real\n \n time = time.to_sec()\n\n # Calculate angle turned in the given time using omega = theta/time\n angle = velocity_data.angular.z*time\n\n # Calculate distance travelled in the given time using linear velocity = arc distance/time\n distance = velocity_data.linear.x*time\n\n # Calculate yaw of the robot\n self.vehicle_yaw += angle\n\n # Calculate vehicle x, y, z position coordinates\n # TODO recalculate the position based on traveling in a circular arc.\n self.pose.position.x += (distance)*cos(self.vehicle_yaw)\n self.pose.position.y += (distance)*sin(self.vehicle_yaw)\n\n # Calculate z position using linear interpolation and create cloud array\n \n # 1. Create ranges to be used in interpolation function\n terrain_points_x = np.arange(0, self.gaussian_array.shape[1]*self.resolution, self.resolution)\n terrain_points_y = np.arange(0, self.gaussian_array.shape[0]*self.resolution, self.resolution)\n\n # 2. Create array of points to be converted to point cloud for vizualization\n terrain_mesh_x, terrain_mesh_y = np.meshgrid(terrain_points_x, terrain_points_y)\n terrain_x = terrain_mesh_x.ravel()\n terrain_y = terrain_mesh_y.ravel()\n terrain_z = self.gaussian_array.ravel()\n terrain_grid_points = np.stack((terrain_x, terrain_y, terrain_z), axis=1)\n\n # 3. Create interpolation function based on the ranges and gaussian data\n interp_func = RectBivariateSpline(terrain_points_y, terrain_points_x, self.gaussian_array)\n\n # 4. Find z value for x and y coordinate of vehicle using interpolation function\n # TODO compute z height based on footprint\n self.pose.position.z = interp_func(self.pose.position.y, self.pose.position.x)\n\n # Convert Euler Angles to Quarternion\n V_rotation = tf.transformations.quaternion_from_euler(0.0, 0.0, self.vehicle_yaw)\n\n # Broadcast vehicle frame which is a child of the world frame\n br = tf.TransformBroadcaster()\n br.sendTransform((self.pose.position.x, self.pose.position.y, self.pose.position.z), \n V_rotation, rospy.Time.now(),\"vehicle_frame\", \"map\")\n\n # Construct the homogenous transformation matrix for map to vehicle frame\n V_translation = [self.pose.position.x, self.pose.position.y, self.pose.position.z]\n map_T_V = tf.transformations.quaternion_matrix(V_rotation) \n map_T_V[:3,3] = np.array(V_translation)\n\n # Create footprint of vehicle\n V_footprint_range_x = np.linspace((-self.vehicle_length/2), (self.vehicle_length/2), 30)\n V_footprint_range_y = np.linspace((-self.vehicle_width/2), (self.vehicle_width/2), 15)\n V_footprint_mesh_x, V_footprint_mesh_y = np.meshgrid(V_footprint_range_x, V_footprint_range_y)\n V_footprint_x = V_footprint_mesh_x.ravel()\n V_footprint_y = V_footprint_mesh_y.ravel()\n\n # For every point in the vehicle footprint, calculate the position wrt to the vehicle's frame\n # and its interpolated z value. Add this point to a list of points for visualization.\n # TODO Flatten into a single matrix multiply to remove for loop\n V_viz_points = []\n for i in range(V_footprint_x.shape[0]):\n p = Point()\n V_footprint_point = np.array([[V_footprint_x[i]],[V_footprint_y[i]], [0.0], [1.0]])\n V_footprint_point = np.matmul(map_T_V, V_footprint_point)\n V_footprint_point[2, 0] = interp_func(V_footprint_point[1, 0], V_footprint_point[0, 0])\n p.x = V_footprint_point[0, 0]\n p.y = V_footprint_point[1, 0]\n p.z = V_footprint_point[2, 0]\n V_viz_points.append(p)\n\n #####################################################################################\n # Create a copy of the most recent stored JointState data to perform calculations\n with self.joint_lock:\n joint_data = copy.deepcopy(self.joint)\n\n # If the data is empty on first run, fill with 0.0\n if not joint_data.velocity:\n joint_data.velocity = [0.0,0.0]\n \n # Calculate angle based on velocity data and time\n angle = joint_data.velocity[0]*time\n angle2 = joint_data.velocity[1]*time\n\n self.joint1_pitch += angle\n self.joint2_pitch += angle2\n\n # Transformations from vehicle frame to Joint1 and Joint2\n \n # Static rotation about z-axis \n static_rot = tf.transformations.quaternion_from_euler(0.0, 0.0, 3.14159)\n translation = [0.0, 0.0, 0.0]\n V_T_SRz = tf.transformations.quaternion_matrix(static_rot)\n V_T_SRz[:3,3] = np.array(translation)\n\n # Dynamic rotation about the y-axis of Joint 1\n rot_SRz_T_J1 = [[cos(self.joint1_pitch), 0.0, sin(self.joint1_pitch)],\n [0.0, 1.0, 0.0],\n [-sin(self.joint1_pitch), 0.0, cos(self.joint1_pitch)]]\n\n trans_SRz_T_J1 = [0.0, 0.0, 0.0, 1.0]\n\n SRz_T_J1 = np.zeros((4,4))\n SRz_T_J1[:3,:3] = rot_SRz_T_J1\n SRz_T_J1[:4,3] = trans_SRz_T_J1\n\n # Translation based on length of Joint 1 arm \n no_rot = tf.transformations.quaternion_from_euler(0.0, 0.0, 0.0)\n translation = [self.joint1_length, 0.0, 0.0]\n J1_T_STx = tf.transformations.quaternion_matrix(no_rot)\n J1_T_STx[:3,3] = np.array(translation)\n\n # Dynamic rotation about y-axis of Joint 2\n dynamic_rot2 = tf.transformations.quaternion_from_euler(0.0, self.joint2_pitch, 0.0)\n translation = [0.0, 0.0, 0.0]\n STx_T_J2 = tf.transformations.quaternion_matrix(dynamic_rot2)\n STx_T_J2[:3,3] = np.array(translation)\n\n # matrix multiplication to form the homogenous matrices\n V_T_J1 = np.matmul(V_T_SRz, SRz_T_J1)\n V_T_STx = np.matmul(V_T_J1, J1_T_STx)\n V_T_J2 = np.matmul(V_T_STx, STx_T_J2)\n\n frame_J1 = tf_conversions.fromMatrix(V_T_J1)\n frame_J2 = tf_conversions.fromMatrix(V_T_J2)\n\n # The ripper tip is a point in the J2's frame, this is based on the length of the ripper\n ripper_tip_point_J2 = [self.ripper_length, 0.0, 0.0, 1.0]\n map_T_J2 = np.matmul(map_T_V, V_T_J2)\n ripper_tip_pt_map = np.matmul(map_T_J2, ripper_tip_point_J2)\n ripper_tip_point_viz = Point()\n ripper_tip_point_viz.x = ripper_tip_pt_map[0]\n ripper_tip_point_viz.y = ripper_tip_pt_map[1]\n ripper_tip_point_viz.z = ripper_tip_pt_map[2]\n V_viz_points.append(ripper_tip_point_viz)\n\n # use the ripper's position as an index value to access the gaussian array\n ripper_tip_cell_index_x = int(ripper_tip_pt_map[1]/self.resolution)\n ripper_tip_cell_index_y = int(ripper_tip_pt_map[0]/self.resolution)\n\n # Create a range of index values surrounding index_x and y\n nearby_index_cells_range_x = np.arange((ripper_tip_cell_index_x-1),(ripper_tip_cell_index_x+2), 1)\n nearby_index_cells_range_y = np.arange((ripper_tip_cell_index_y-1),(ripper_tip_cell_index_y+2), 1)\n nearby_index_cells_mesh_x, nearby_index_cells_mesh_y = np.meshgrid(nearby_index_cells_range_x,nearby_index_cells_range_y)\n nearby_index_cells_x = nearby_index_cells_mesh_x.ravel()\n nearby_index_cells_y = nearby_index_cells_mesh_y.ravel()\n\n # First check if the index is within the gaussian array, if it is, then check if the tip of\n # the ripper is beneath the soil, if it is, then remove the soil above the tip and disperse\n # it to the surrounding cells, provided those cells are also within the gaussian array\n # TODO Remove use of for loops and excess if statements\n\n if (0 <= ripper_tip_cell_index_x <= (self.gaussian_array.shape[0]-1)) and (0 <= ripper_tip_cell_index_y <= (self.gaussian_array.shape[1]-1)):\n if (self.gaussian_array[ripper_tip_cell_index_x][ripper_tip_cell_index_y] > ripper_tip_pt_map[2]):\n diff = self.gaussian_array[ripper_tip_cell_index_x][ripper_tip_cell_index_y] - ripper_tip_pt_map[2]\n for i in range(nearby_index_cells_x.shape[0]):\n if (0 <= nearby_index_cells_x[i] <= (self.gaussian_array.shape[0]-1)) and (0 <= nearby_index_cells_y[i] <= (self.gaussian_array.shape[1]-1)):\n self.gaussian_array[nearby_index_cells_x[i]][nearby_index_cells_y[i]] += diff/8\n self.gaussian_array[ripper_tip_cell_index_x][ripper_tip_cell_index_y] = ripper_tip_pt_map[2]\n \n\n # Publish all messages\n self.publish_messages(V_translation, V_rotation, terrain_grid_points, V_viz_points, frame_J1, frame_J2)", "def refSpeed_callback(self, msg):\n self.mutex.acquire()\n\n self.speed_ref[0] = msg.vx\n self.speed_ref[1] = msg.vy\n self.speed_ref[2] = msg.vz\n\n self.mutex.release()\n rospy.loginfo(\"%s receive speed reference\", self.node_name)", "def robot_arm_vel(self):\n return self.sim.data.qvel[self.arm_index]", "def update_vel(self, forces, dt):\n\n for particle, force in zip(self.particles, forces):\n particle.leap_velocity(dt, force)\n return None", "def physics(self):\n\n self.v_y += self.a_y * self.dt # v =at\n dy = self.v_y * self.dt # x = vt\n self.rect.move_ip(0, -dy)", "def target_velocity(self, time):\n\n x_v = self.w*self.r*cos(self.w*time)\n y_v = -self.w*self.r*sin(self.w*time)\n z_v = 0\n # raise NotImplementedError\n return np.array([x_v,y_v,z_v])", "def velocity(self):\n return self._velocity", "def velocity(self):\n return self._velocity", "def vel(track, t_vel, r):\n time = 0\n for msg in track:\n if msg.type == 'note_on' or msg.type=='note_off':\n time += msg.time\n if msg.type == 'note_on':\n if msg.velocity != 0: # To avoid messing with certain mid\n r_mod = t_vel*r\n msg.velocity = rd.randint(max(t_vel - r_mod, 0),\n min(t_vel + r_mod, 127))\n return track", "def velocity_field(xt,yt,x0,y0,Vinf,dia,rot,chord,B,param=None,veltype='all',integration='simp',m=220,n=200):\n rad = dia/2.\n tsr = rad*fabs(rot)/Vinf\n solidity = (chord*B)/rad\n\n # Translating the turbine position\n x0t = x0 - xt\n y0t = y0 - yt\n\n coef0,coef1,coef2,coef3,coef4,coef5,coef6,coef7,coef8,coef9 = coef_val()\n\n # Calculating EMG distribution parameters (based on polynomial surface fitting)\n if param is None:\n loc1 = _parameterval(tsr,solidity,coef0)\n loc2 = _parameterval(tsr,solidity,coef1)\n loc3 = _parameterval(tsr,solidity,coef2)\n spr1 = _parameterval(tsr,solidity,coef3)\n spr2 = _parameterval(tsr,solidity,coef4)\n skw1 = _parameterval(tsr,solidity,coef5)\n skw2 = _parameterval(tsr,solidity,coef6)\n scl1 = _parameterval(tsr,solidity,coef7)\n scl2 = _parameterval(tsr,solidity,coef8)\n scl3 = _parameterval(tsr,solidity,coef9)\n\n else:\n # Reading in EMG distribution parameters\n loc1 = param[0]\n loc2 = param[1]\n loc3 = param[2]\n spr1 = param[3]\n spr2 = param[4]\n skw1 = param[5]\n skw2 = param[6]\n scl1 = param[7]\n scl2 = param[8]\n scl3 = param[9]\n\n ###################################\n if veltype == 'vort':\n # VORTICITY CALCULATION (NO INTEGRATION)\n if x0t < 0.:\n vel = 0.\n else:\n vel = _vawtwake.vorticitystrength(x0t,y0t,dia,loc1,loc2,loc3,spr1,spr2,skw1,skw2,scl1,scl2,scl3)/rot\n ###################################\n else:\n # Integration of the vorticity profile to calculate velocity\n if integration == 'simp':\n # SIMPSON'S RULE INTEGRATION (must use polynomial surface coefficients from VAWTPolySurfaceCoef.csv)\n inte = 1 # Simpson's Rule\n # inte = 2 # Trapezoidal Rule (optional ability of the code-- faster but less accurate)\n\n if param is not None:\n print \"**** Using polynomial surface coefficients from VAWTPolySurfaceCoef.csv for Simpson's rule integration ****\"\n\n vel_xs,vel_ys = _vawtwake.vel_field(xt,yt,x0,y0,dia,rot,chord,B,Vinf,coef0,coef1,coef2,coef3,coef4,coef5,coef6,coef7,coef8,coef9,m,n,inte)\n\n if veltype == 'all':\n vel = sqrt((vel_xs*Vinf + Vinf)**2 + (vel_ys*Vinf)**2)/Vinf\n elif veltype == 'x':\n vel = (vel_xs*Vinf + Vinf)/Vinf\n elif veltype == 'y':\n vel = vel_ys\n elif veltype == 'ind':\n vel = np.array([vel_xs,vel_ys])\n ###################################\n elif integration == 'gskr':\n # 21-POINT GAUSS-KRONROD RULE QUADRATURE INTEGRATION\n xbound = (scl3+5.)*dia\n argval = (x0t,y0t,dia,loc1,loc2,loc3,spr1,spr2,skw1,skw2,scl1,scl2,scl3)\n if veltype == 'all' or veltype == 'x' or veltype == 'ind':\n vel_x = _dblquad(_vawtwake.integrandx,0.,xbound,lambda x: -1.*dia,lambda x: 1.*dia,args=argval)\n vel_xs = (vel_x[0]*fabs(rot))/(2.*pi)\n if veltype == 'all' or veltype == 'y' or veltype == 'ind':\n vel_y = _dblquad(_vawtwake.integrandy,0.,xbound,lambda x: -1.*dia,lambda x: 1.*dia,args=argval)\n vel_ys = (vel_y[0]*fabs(rot))/(2.*pi)\n\n if veltype == 'all':\n vel = sqrt((vel_xs + Vinf)**2 + (vel_ys)**2)/Vinf\n elif veltype == 'x':\n vel = (vel_xs + Vinf)/Vinf\n elif veltype == 'y':\n vel = vel_ys/Vinf\n elif veltype == 'ind':\n vel = np.array([vel_xs,vel_ys])/Vinf\n ###################################\n\n return vel", "def current_velocity():\n global current_velocity\n while current_velocity is None:\n pass\n return current_velocity", "def ball_increase_velocity():\n global ball_vel\n ball_vel[0] = ball_vel[0] * 1.10\n ball_vel[1] = ball_vel[1] * 1.10", "def __init__(self, timestep=1.0 * simtk.unit.femtoseconds):\n\n super(VelocityVerletIntegrator, self).__init__(timestep)\n\n self.addPerDofVariable(\"x1\", 0)\n\n self.addUpdateContextState()\n self.addComputePerDof(\"v\", \"v+0.5*dt*f/m\")\n self.addComputePerDof(\"x\", \"x+dt*v\")\n self.addComputePerDof(\"x1\", \"x\")\n self.addConstrainPositions()\n self.addComputePerDof(\"v\", \"v+0.5*dt*f/m+(x-x1)/dt\")\n self.addConstrainVelocities()", "def motor_velocities(self):\n return np.asarray(self._robot_state.velocity)", "def calculate_velocity(self, speed):\n self.velocity.dx += math.cos(math.radians(self.angle)) * speed\n self.velocity.dy += math.sin(math.radians(self.angle)) * speed", "def yMotor(self,controlQueue):\n\n #while True:\n if self.yVelocity !=0:\n self.M2.runVelocityT(self.yVelocity,self.timeUnit)\n # if not controlQueue.empty():\n # break", "def controller_routine(self, event=None):\n # pop trajectory if available\n with self.lock:\n vel_commands_in_rob = self.vel_commands_in_rob\n self.vel_commands_in_rob = None\n if vel_commands_in_rob is None:\n return\n # execute trajectory unless a new one is requested\n for i in range(len(vel_commands_in_rob)):\n # check if trajectory is stale\n if (i * DT) >= CONTROLLER_ZOMBIE_TIMEOUT:\n cmd_vel_msg = Twist()\n rospy.logwarn(\"Reached controller zombie timeout\")\n cmd_vel_msg.linear.x = 0\n cmd_vel_msg.linear.y = 0\n cmd_vel_msg.angular.z = 0\n self.cmd_vel_pub.publish(cmd_vel_msg)\n return\n # send current trajectory\n with self.lock:\n STOP = self.STOP\n if not STOP:\n vel_x = vel_commands_in_rob[i, 0] * VEL_MULT\n vel_y = vel_commands_in_rob[i, 1] * VEL_MULT\n vel_norm = np.sqrt(vel_x**2 + vel_y**2)\n if vel_norm > self.MAX_VEL:\n vel_x = vel_x / vel_norm * self.MAX_VEL\n vel_y = vel_y / vel_norm * self.MAX_VEL\n # publish cmd_vel\n cmd_vel_msg = Twist()\n cmd_vel_msg.linear.x = vel_x\n cmd_vel_msg.linear.y = vel_y\n cmd_vel_msg.angular.z = vel_commands_in_rob[i, 2] * VEL_MULT\n self.cmd_vel_pub.publish(cmd_vel_msg)\n # sleep for DT\n rospy.sleep(DT)\n # check if a newer trajectory is available\n with self.lock:\n if self.vel_commands_in_rob is not None:\n return", "def velocity(self) -> np.ndarray:\n return self._state[3:5]", "def set_hand_vel(self,vel):\n # Calculate joint velocities to achieve desired velocity\n joint_vels=np.dot(self._kin.jacobian_pseudo_inverse(),vel)\n joints=dict(zip(self._arm.joint_names(),(joint_vels)))\n\n self._arm.set_joint_velocities(joints)", "def velocity(self, X, Y):\n self.u = self.Vinf * np.ones_like(X)\n self.v = np.zeros_like(X)", "def setVelocity(self,v):\n if v is None:\n self.v = Cartesian3DVector()\n else:\n if isinstance(v,Cartesian3DVector):\n self.v = Cartesian3DVector(v.x,v.y,v.z)\n else:\n raise CoordinateVector(\"Initializing a particle with the incorrect velocity vector type.\")", "def velocity(self, X, Y):\n self.u = (self.strength / (2 * np.pi) *\n (X - self.xc) / ((X - self.xc)**2 + (Y - self.yc)**2))\n self.v = (self.strength / (2 * np.pi) *\n (Y - self.yc) / ((X - self.xc)**2 + (Y - self.yc)**2))", "def __init__(self, maneuver_velocity_setpoint,\n maneuver_reference_frame,\n maneuver_duration):\n\n # Create node with name 'translation_controller' and set update rate\n rospy.init_node('translation_controller')\n\n # A publisher which will publish the desired linear and anglar velocity to the topic '/.../cmd_vel_unstamped'\n self.vel_setpoint_pub = rospy.Publisher('/mavros/setpoint_velocity/cmd_vel_unstamped', Twist, queue_size = 1)\n self.vel_setpoint_bu_lenu__lenu = Twist()\n\n # A subscriber to the topic '/mavros/state'. self.state is called when a message of type 'State' is recieved\n self.state_sub = rospy.Subscriber(\"/mavros/state\", State, self.state_cb)\n self.current_state = State()\n self.prev_state = State()\n\n # A subscriber to the /mavros/local_position/pose topic that is used to access the transform between the body-up\n # and local ENU frames\n self.pose_sub = rospy.Subscriber('/mavros/local_position/pose', PoseStamped, self.pose_sub_cb)\n self.q_bu_lenu = None\n\n self.rate = rospy.Rate(Constants.RATE)\n self.offboard_point_streaming = False\n self.static_transforms = StaticTransforms()\n self.maneuver_velocity_setpoint = maneuver_velocity_setpoint\n self.maneuver_reference_frame = maneuver_reference_frame\n self.maneuver_duration = maneuver_duration", "def currentstate_callback(self, odom):\n self.CurrentPosition = np.array([odom.pose.pose.position.x, odom.pose.pose.position.y, odom.pose.pose.position.z])\n self.CurrentVelocity = np.array([odom.twist.twist.linear.x, odom.twist.twist.linear.y, odom.twist.twist.linear.z])", "def LeapFrog(self,r,v,dt):\n\n rhalf = r + np.asarray(v)*(dt/2) #Taking a half step forward with positional vector\n # predict the final velocity at the next timestep using the acceleration field at the rhalf position \n vnew = v + self.M31Accel(rhalf)*dt\n # predict the final position using the average of the current velocity and the final velocity\n rnew = r + 0.5*(v+vnew)*dt\n \n return rnew,vnew", "def input(self):\n\n self.vx, self.vy = 0, 0\n\n game = self.game\n\n x_axis = game.get_axis(0)\n if abs(x_axis) < JOYSTICK_THRESHOLD:\n x_axis = 0\n y_axis = game.get_axis(1)\n if abs(y_axis) < JOYSTICK_THRESHOLD:\n y_axis = 0\n\n if game.get_vbutton_down('left'):\n x_axis = -1\n elif game.get_vbutton_down('right'):\n x_axis = 1\n if game.get_vbutton_down('up'):\n y_axis = -1\n elif game.get_vbutton_down('down'):\n y_axis = 1\n elif game.get_vbutton_down('top_left'):\n x_axis = -1\n y_axis = -1\n elif game.get_vbutton_down('top_right'):\n x_axis = 1\n y_axis = -1\n elif game.get_vbutton_down('bottom_left'):\n x_axis = -1\n y_axis = 1\n elif game.get_vbutton_down('bottom_right'):\n x_axis = 1\n y_axis = 1\n\n # Check for collisions\n if self.get_obstacles(self.spd * x_axis, 0):\n x_axis = 0\n if self.get_obstacles(0, self.spd * y_axis):\n y_axis = 0\n\n self.vx = self.spd * x_axis\n self.vy = self.spd * y_axis\n\n if y_axis != 0:\n self.last_movey = y_axis\n self.last_movex = 0\n elif x_axis != 0:\n self.last_movex = x_axis\n self.last_movey = 0\n\n # diagonals\n if self.vx != 0 and self.vy != 0:\n self.vx *= 0.707\n self.vy *= 0.707\n\n if game.get_vbutton_jp('drop') or game.get_joystick_jp(J_BUTTONS['X']):\n self.drop_item()\n elif game.get_vbutton_jp('pickup') or game.get_joystick_jp(J_BUTTONS['A']):\n self.pickup_items()\n\n return self.is_moving()", "def getVelocity(self):\n\t\tif len(self.prevPositions) < 2:\n\t\t\tself.velocity = 0\n\t\telse:\n\t\t\ttime = self.position[2] - self.prevPositions[len(self.prevPositions)-1][2]\n\t\t\txdist = self.position[0][0] - self.prevPositions[len(self.prevPositions)-1][0][0]\n\t\t\tydist = self.position[0][1] - self.prevPositions[len(self.prevPositions)-1][0][1]\n\t\t\tself.velocity = (xdist,ydist,time.total_seconds())\n\t\treturn self.velocity\n\t\t\t#speed = math.pow(math.pow(1.0*xdist,2) + math.pow(1.0*ydist,2),0.5) / (1.0*time.total_seconds())", "def VerletHope2(r, v, beta,dt,R_dust,M_dust):\n # Deceptively simple (read about Velocity Verlet on wikipedia)\n r_new = r + v*dt + calculate_acceleration2(r,v,beta,omega,R_dust,M_dust)*dt**2/2\n v_new = v + (calculate_acceleration2(r,v,beta,omega,R_dust,M_dust) + calculate_acceleration2(r_new,v,beta,omega,R_dust,M_dust))/2 * dt\n \n return (r_new, v_new)", "def loop(self):\n rospy.loginfo(\"Hexapod entering main loop...\")\n rospy.loginfo(\" Waiting for initial velocity command on /hexapod/cmd_vel/ ...\")\n while self.last_vel_cmd is None:\n self._loop_rate.sleep()\n\n # start main loop\n while not rospy.is_shutdown():\n\n chassis_pos_delta = None\n if self.last_vel_cmd is not None:\n dt = 1 # FIXME: Temporary for debugging\n lin_disp_lmt = self.linear_displacement_limit\n ang_disp_lmt = self.angular_displacement_limit\n chassis_pos_delta = Twist()\n chassis_pos_delta.linear.x = clamp(self.last_vel_cmd.linear.x*dt, -lin_disp_lmt, lin_disp_lmt)\n chassis_pos_delta.linear.y = clamp(self.last_vel_cmd.linear.y*dt, -lin_disp_lmt, lin_disp_lmt)\n chassis_pos_delta.linear.z = clamp(self.last_vel_cmd.linear.z*dt, -lin_disp_lmt, lin_disp_lmt)\n chassis_pos_delta.angular.x = clamp(self.last_vel_cmd.angular.x*dt, -ang_disp_lmt, ang_disp_lmt)\n chassis_pos_delta.angular.y = clamp(self.last_vel_cmd.angular.y*dt, -ang_disp_lmt, ang_disp_lmt)\n chassis_pos_delta.angular.z = clamp(self.last_vel_cmd.angular.z*dt, -ang_disp_lmt, ang_disp_lmt)\n self.last_vel_cmd = None\n\n if chassis_pos_delta is not None \\\n and not self._check_if_twist_msg_is_zero(chassis_pos_delta, linear_threshold=0.005, angular_threshold=0.01):\n # Get chassis position transformation\n chassis_pos_rot = transforms.euler_matrix(chassis_pos_delta.angular.x,\n chassis_pos_delta.angular.y,\n chassis_pos_delta.angular.z)[:3,:3]\n\n rospy.loginfo(\"chassis_pos_rot: %s\", chassis_pos_rot)\n chassis_pos_trans = np.zeros([3])\n chassis_pos_trans[0] = chassis_pos_delta.linear.x\n chassis_pos_trans[1] = chassis_pos_delta.linear.y\n chassis_pos_trans[2] = chassis_pos_delta.linear.z\n chassis_translation = np.dot(chassis_pos_trans, chassis_pos_rot)\n rospy.loginfo(\"chassis_translation: %s\", chassis_translation)\n\n leg_target_eff_translation = [[]]*LEGS\n # Get leg base positions relative to chassis\n leg_base_positions = self._get_base_to_leg_base_fk()\n for i, leg_base_pos in enumerate(leg_base_positions):\n leg_base_pos_arr = np.array(leg_base_pos).reshape(3,1)\n leg_base_pos_arr_new = np.dot(chassis_pos_rot, leg_base_pos_arr)\n leg_base_pos_trans_4 = np.ones(4).reshape(4,1)\n leg_base_pos_trans_4[:3,:] = leg_base_pos_arr_new\n # get leg base translations relative to leg_base coordinate frame\n relative_trans = np.dot(np.linalg.inv(self.kdl_fk_base_to_leg_base[i].forward([])), leg_base_pos_trans_4)\n relative_trans = relative_trans.reshape(1,4).tolist()[0][:3]\n leg_target_eff_translation[i] = relative_trans\n\n # Get leg target end-effector translations\n for i, q in enumerate(self.leg_jt_home_pos):\n base_to_leg_base_rot = self.kdl_fk_base_to_leg_base[i].forward([])[:3,:3]\n leg_target_eff_trans = np.dot(np.linalg.inv(base_to_leg_base_rot),chassis_translation).tolist()[0]\n leg_target_eff_translation[i] = [x+y for x,y in zip(leg_target_eff_translation[i], leg_target_eff_trans)] # TODO: FIXME: Technically incorrect\n\n # 1: side_alpha legs lift, plant to +transformation\n rospy.loginfo(\"1: side_alpha legs lift, plant to +transformation\")\n if self._odd_starts:\n active_legs = [1,2,5]\n else: # even starts\n active_legs = [0,3,4]\n\n init_wp = WaypointMsg()\n lift_wp = WaypointMsg()\n end_wp = WaypointMsg()\n\n legs_jt_init_pos = self._get_joint_angles()\n leg_eff_cur_pos = self._get_leg_base_to_eff_fk(legs_jt_init_pos)\n for i in range(LEGS):\n motor_names = [name for name in self.hebi_mapping[i]]\n # INITIAL POSITION\n init_wp.names.extend(motor_names)\n init_wp.positions.extend(legs_jt_init_pos[i])\n init_wp.velocities.extend([0.0]*ACTUATORS_PER_LEG)\n init_wp.accelerations.extend([0.0]*ACTUATORS_PER_LEG)\n # LIFT\n lift_wp.names.extend(motor_names)\n if i in active_legs:\n # apply translation\n leg_lift_eff_target_pos = [(x + y + z) / 2.0 for x, y, z in zip(leg_eff_cur_pos[i], self.leg_eff_home_pos[i], leg_target_eff_translation[i])]\n leg_lift_eff_target_pos = [x + y for x,y in zip(leg_lift_eff_target_pos, self.leg_eff_step_height[i])]\n # get ik\n leg_lift_jt_target_pos = self._get_pos_ik(self.trac_ik_leg_base_to_end[i], legs_jt_init_pos[i],\n leg_lift_eff_target_pos, seed_xyz=self.leg_eff_home_pos[i])\n lift_wp.positions.extend(leg_lift_jt_target_pos)\n lift_wp.velocities.extend([NAN]*ACTUATORS_PER_LEG)\n lift_wp.accelerations.extend([NAN]*ACTUATORS_PER_LEG)\n else:\n lift_wp.positions.extend(legs_jt_init_pos[i])\n lift_wp.velocities.extend([0.0]*ACTUATORS_PER_LEG)\n lift_wp.accelerations.extend([0.0]*ACTUATORS_PER_LEG)\n # PLANT\n end_wp.names.extend(motor_names)\n if i in active_legs:\n # apply translation\n leg_plant_eff_target_pos = [x + y for x,y in zip(self.leg_eff_home_pos[i], leg_target_eff_translation[i])]\n leg_plant_eff_target_pos[2] = self.leg_eff_home_pos[i][2] # end eff z-position should match home z-position\n # get ik\n leg_plant_jt_target_pos = self._get_pos_ik(self.trac_ik_leg_base_to_end[i], leg_lift_jt_target_pos,\n leg_plant_eff_target_pos, seed_xyz=leg_lift_eff_target_pos)\n end_wp.positions.extend(leg_plant_jt_target_pos)\n end_wp.velocities.extend([0.0]*ACTUATORS_PER_LEG)\n end_wp.accelerations.extend([0.0]*ACTUATORS_PER_LEG)\n else:\n end_wp.positions.extend(legs_jt_init_pos[i])\n end_wp.velocities.extend([0.0]*ACTUATORS_PER_LEG)\n end_wp.accelerations.extend([0.0]*ACTUATORS_PER_LEG)\n\n goal = TrajectoryGoal()\n goal.waypoints.append(init_wp)\n goal.waypoints.append(lift_wp)\n goal.waypoints.append(end_wp)\n goal.times.extend([0.0, 0.4, 0.8])\n\n self.release_pos([1,2,3,4,5,6])\n self.trajectory_action_client.send_goal(goal)\n self.trajectory_action_client.wait_for_result()\n self.hold_pos([1,2,3,4,5,6])\n\n # 2: side_alpha legs push to new home positions; side_beta legs push to -transformation\n rospy.loginfo(\"2: side_alpha legs push to new home positions; side_beta legs push to -transformation\")\n if self._odd_starts:\n active_legs = [0,3,4]\n else: # even starts\n active_legs = [1,2,5]\n\n init_wp = WaypointMsg()\n end_wp = WaypointMsg()\n\n legs_jt_init_pos = self._get_joint_angles()\n for i in range(LEGS):\n motor_names = [name for name in self.hebi_mapping[i]]\n # INITIAL POSITION\n init_wp.names.extend(motor_names)\n init_wp.positions.extend(legs_jt_init_pos[i])\n init_wp.velocities.extend([0.0]*ACTUATORS_PER_LEG)\n init_wp.accelerations.extend([0.0]*ACTUATORS_PER_LEG)\n # PUSH\n end_wp.names.extend(motor_names)\n if i in active_legs:\n # apply -translation\n leg_plant_eff_target_pos = [x + y for x,y in zip(self.leg_eff_home_pos[i], [-val for val in leg_target_eff_translation[i]])]\n leg_plant_eff_target_pos[2] = self.leg_eff_home_pos[i][2] # end eff z-position should match home z-position\n # get ik\n leg_plant_jt_target_pos = self._get_pos_ik(self.trac_ik_leg_base_to_end[i], legs_jt_init_pos[i],\n leg_plant_eff_target_pos, seed_xyz=self.leg_eff_home_pos[i])\n end_wp.positions.extend(leg_plant_jt_target_pos)\n end_wp.velocities.extend([0.0]*ACTUATORS_PER_LEG)\n end_wp.accelerations.extend([0.0]*ACTUATORS_PER_LEG)\n else:\n end_wp.positions.extend(self.leg_jt_home_pos[i])\n end_wp.velocities.extend([0.0]*ACTUATORS_PER_LEG)\n end_wp.accelerations.extend([0.0]*ACTUATORS_PER_LEG)\n\n goal = TrajectoryGoal()\n goal.waypoints.append(init_wp)\n goal.waypoints.append(end_wp)\n goal.times.extend([0.0, 0.4])\n\n self.release_pos([1,2,3,4,5,6])\n self.trajectory_action_client.send_goal(goal)\n self.trajectory_action_client.wait_for_result()\n self.hold_pos([1,2,3,4,5,6])\n\n self._odd_starts = not self._odd_starts\n\n self._loop_rate.sleep() # FIXME: Doesn't make sense to use this unless re-planning trajectories\n # end main loop", "def sent_velocity(self,velocity):\n if self.mode == 3: # Profiled Velocity\n self.node.sdo[0x6040].bits[0] = 1\n self.node.sdo[0x6040].bits[1] = 1\n self.node.sdo[0x6040].bits[2] = 1\n self.node.sdo[0x6040].bits[3] = 1\n # self.node.sdo[0x6040].bits[7] = 0\n velocity = 10 * self._I85_msg_to_device(velocity)\n self.node.sdo.download(0x60ff, 0x0, self._decTohex_32(velocity)) # velocity", "def velocity(self,level='cell'):\r\n\r\n # 每个section中总是储存t+1时刻的volume,t到t+1的flow,即一个仿真步长(step)过程中的流量和仿真步长结束时的元胞中车辆数\r\n # 但计算速度需要用到仿真步长开始时的元胞密度,因此要对应时刻的元胞中车辆数vol_t = Vol_t+1 + outflow_t - inflow_t \r\n vels = []\r\n vols = self.last_sim_step_volume()\r\n \r\n if level=='cell':\r\n # 计算第一个元胞\r\n vol = vols[0]\r\n outflow = self.flows[0]\r\n if vol == 0 :\r\n vels.append(0)\r\n else :\r\n vel = outflow*3600/(vol/self.cell_length)\r\n vels.append(round(vel,2))\r\n \r\n # 计算中间元胞\r\n for i in range(1,self.cells_number-1):\r\n vol = vols[i]\r\n outflow = self.flows[i]\r\n if vol == 0 :\r\n vels.append(0)\r\n else:\r\n vel = outflow*3600/(vol/self.cell_length)\r\n vels.append(round(vel,2))\r\n\r\n # 计算最后一个元胞\r\n vol = vols[-1]\r\n outflow = self.outflow\r\n if vol==0:\r\n vels.append(0)\r\n else:\r\n vel = outflow*3600/(vol/self.cell_length)\r\n vels.append(round(vel,2))\r\n \r\n return vels\r\n \r\n elif level=='section': \r\n # 先计算每一个元胞的再按照volume计算加权平均\r\n \r\n # 计算第一个元胞\r\n vol = vols[0]\r\n outflow = self.flows[0]\r\n if vol == 0 :\r\n vels.append(0)\r\n else :\r\n vel = outflow*3600/(vol/self.cell_length)\r\n vels.append(round(vel,2))\r\n \r\n # 计算中间元胞\r\n for i in range(1,self.cells_number-1):\r\n vol = vols[i]\r\n outflow = self.flows[i]\r\n if vol == 0 :\r\n vels.append(0)\r\n else:\r\n vel = outflow*3600/(vol/self.cell_length)\r\n vels.append(round(vel,2))\r\n\r\n # 计算最后一个元胞\r\n vol = vols[-1]\r\n outflow = self.outflow\r\n if vol==0:\r\n vels.append(0)\r\n else:\r\n vel = outflow*3600/(vol/self.cell_length)\r\n vels.append(round(vel,2)) \r\n\r\n \r\n # 将速度按照volume加权平均\r\n weighted_vels = [vel*vol for vel, vol in zip(vels,vols)]\r\n sum_vol = sum(vols)\r\n if sum_vol == 0:\r\n avg_vel = 0\r\n else:\r\n avg_vel = round(sum(weighted_vels)/sum_vol,2)\r\n \r\n return avg_vel\r\n\r\n\r\n else :\r\n raise ValueError('no such level for collecting data')", "def accelerate(self):\n\t\tself.velocity += self.direction * self.ACCELERATION", "def cmd_callback(data, car):\n vel = int(data.velocity)\n steer = int(data.steer)\n vel = np.min([np.max([vel, -100]), 100])\n steer = np.min([np.max([steer, -100]), 100])\n\n if car.is_connected():\n # rospy.loginfo('CarAct: [' + rospy.get_caller_id() +\n # ']: v=%f, s=%f', vel, steer)\n car.command_car(vel, steer)\n else:\n print(\"Something is wrong with serial connection\")", "def update_motor_speeds(self, event):\n \n # Determine the time step for differentiation and integration\n current_time = rospy.get_time()\n dt = current_time - self.old_time\n \n # Get the motor desired speeds from the onboard controller\n motor_control = self.onboard_controller.get_control_input(dt)\n [front_left, front_right, rear_left, rear_right] = motor_control\n \n # Set the motor_cmd with the controller values\n self.vel_prop_msg.motor_cmd = [front_left, front_right, rear_left, rear_right]\n\n # Publish the motor commands for the ardrone plugin\n self.pub_vel_prop.publish(self.vel_prop_msg)\n \n # Set the old time to the current for future time step calculations\n self.old_time = current_time", "def __init__(\n self,\n velocity_north_m_s,\n velocity_east_m_s,\n velocity_down_m_s):\n self.velocity_north_m_s = velocity_north_m_s\n self.velocity_east_m_s = velocity_east_m_s\n self.velocity_down_m_s = velocity_down_m_s", "def edge_velocity(self):\n #reflext x values at x edges\n self.u[1,:,0] = -self.u[1,:,1]\n self.u[1,:,-1] = -self.u[1,:,-2]\n #mirror x values at y edges \n self.u[1,0,:] = self.u[1,1,:]\n self.u[1,-1,:] = self.u[1,-2,:]\n #mirror y values at x edges\n self.u[0,:,0] = self.u[0,:,1]\n self.u[0,:,-1] = self.u[0,:,-2]\n #mirror y values at y edges \n self.u[0,0,:] = -self.u[0,1,:]\n self.u[0,-1,:] = -self.u[0,-2,:]", "def vel_x(self, *args, **kwargs) -> Any:\n pass" ]
[ "0.79091567", "0.76079005", "0.7603583", "0.74135023", "0.72956824", "0.7251918", "0.72338396", "0.7038976", "0.7038976", "0.6989811", "0.69450855", "0.693247", "0.68990725", "0.68878675", "0.6857321", "0.68244934", "0.68088895", "0.67901736", "0.6790032", "0.6769608", "0.6767587", "0.6736919", "0.6685146", "0.6684473", "0.6657189", "0.66459566", "0.6606279", "0.6576455", "0.656805", "0.65562385", "0.65256363", "0.6477236", "0.645893", "0.6456584", "0.6456584", "0.6455062", "0.6445882", "0.6430806", "0.6407512", "0.63960195", "0.63491726", "0.63361794", "0.63272345", "0.63189983", "0.6302409", "0.62948954", "0.6284564", "0.6270373", "0.6260661", "0.62597984", "0.62485224", "0.62476367", "0.62106395", "0.6200862", "0.6185471", "0.6166114", "0.61656636", "0.6161824", "0.6154784", "0.61532444", "0.6149986", "0.6141615", "0.6133219", "0.6129686", "0.6115473", "0.6103058", "0.60980767", "0.6089765", "0.60892504", "0.6088393", "0.6088393", "0.60822445", "0.6082017", "0.60778826", "0.6052995", "0.6047482", "0.60382664", "0.60065365", "0.59895504", "0.59884804", "0.59834886", "0.5970342", "0.59677285", "0.5965703", "0.5964846", "0.5955841", "0.594996", "0.59496945", "0.5948425", "0.59345365", "0.5923672", "0.59192014", "0.59190917", "0.5916338", "0.5915011", "0.5907747", "0.5905844", "0.59035134", "0.5895176", "0.5892785" ]
0.65173507
31
position callback for the arm
def get_position(self, message): #print('**************** pos ') self.position = message.data self.state[self.ndegres:] = self.position[0:self.ndegres]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _onmove(self, event):", "def xyzArmCallback(msg):\n global robot\n # extract message components and normalize - joystick provides [-100,100] and \n # we will scale to [-0.1,0.1]\n arm_x = msg.data[0]/ARM_DATA_SCALING\n arm_y = msg.data[1]/ARM_DATA_SCALING\n arm_z = msg.data[2]\n\n # conditionally scale Z axis movements\n if(arm_z > 0):\n arm_z = msg.data[2] / ARM_DATA_SCALING_UP_Z\n elif(arm_z < 0):\n arm_z = msg.data[2] / ARM_DATA_SCALING_DOWN_Z\n\n if (arm_x == 0 and arm_y == 0 and arm_z == 0):\n #rospy.loginfo(\"no arm movement requested\")\n i=0 #placeholder\n\n else:\n # displacement = np.array([arm_x, arm_y, arm_z])\n # success = robot.arm.move_ee_xyz(displacement, plan=False)\n # rospy.loginfo(\"tried to move arm\")\n displacement = np.array([arm_x, arm_y, arm_z])\n t,r,q = robot.arm.pose_ee\n if (t[2] < min_z):\n rospy.loginfo(\"arm too low, safety protocol activated with z=%s\",str(t[2]))\n elif(t[2] > max_z):\n rospy.loginfo(\"arm too high, safety protocol activated with z=%s\",str(t[2]))\n else:\n translation = np.add(np.asarray(t).flatten(), displacement)\n orientation = np.asarray(r)\n ident = np.eye(3)\n orientation[:,2] = ident[:,2]\n orientation[2,:] = ident[2,:]\n robot.arm.set_ee_pose(translation, orientation, plan=False)\n rospy.loginfo(\"translation was %s\", str(translation))\n rospy.loginfo(\"orientation was %s\", str(orientation))", "def on_position_change(self) -> None:\n pass", "def get_position(self, position):", "def _callback(self, data):\n # Retrieve data.\n vehicle_id = data.id\n x = data.x\n y = data.y\n theta = data.theta\n vel = data.v\n\n self.positions[vehicle_id] = [x, y, theta, vel]", "def movement(self):", "def motorPositionChanged(self, absolutePosition):\n pass", "def GetPosition(self):\n ...", "def position(self):\r\n pass", "def armLocation(self,length, theta, position = [0,0]):\n #print \"Angle:\",theta\n \n width = 300\n dx = 125\n #dy = 40\n bumpx = 150\n bumpy = length/2\n #width = 300\n \n #dx = 175\n dy = 170\n \n #p1 = (position[0]+dx*cos(theta)+dy*cos(pi/2 - theta),position[1]-dx*sin(theta)+dy*sin(pi/2 - theta))\n #p2 = (p1[0]-length*sin(theta),p1[1]-length*cos(theta))\n #p3 = (p2[0]-width*cos(theta),p2[1]+width*sin(theta))\n #p4 = (p3[0]+length*sin(theta),p3[1]+length*cos(theta))\n\n p1 = (position[0]+dx*cos(theta)+dy*cos(pi/2 - theta),position[1]-dx*sin(theta)+dy*sin(pi/2 - theta))\n p2 = (p1[0]-length*sin(theta),p1[1]-length*cos(theta))\n p3 = (p2[0]-(width+bumpx)*cos(theta),p2[1]+(width+bumpx)*sin(theta))\n p4 = (p3[0]+bumpy*sin(theta),p3[1]+bumpy*cos(theta))\n p5 = (p4[0]+bumpx*cos(theta),p4[1]-bumpx*sin(theta))\n p6 = (p5[0]+(length-bumpy)*sin(theta),p5[1]+(length-bumpy)*cos(theta))\n\n \n #plt.plot([p1[0], p2[0], p3[0], p4[0], p1[0]], [p1[1], p2[1], p3[1], p4[1], p1[1]])\n #plt.axis([-700, 700, -200, 700])\n #plt.show()\n return [p1, p2, p3, p4, p5, p6]", "def on_position(self, position: PositionData):\n # self.on_event(EVENT_POSITION, position)\n # self.on_event(EVENT_POSITION + position.vt_symbol, position)\n pass", "def position_changed(self, position):\n pass", "def register_position_receiver(self, callback):\n\n # Register the handler\n self._registered_handlers.append(callback)", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def getPosition(*args):", "def pos(self, *args, **kwargs) -> Any:\n pass", "def update_position(position):\n pass", "def _get_position(self):\n\n rospy.Subscriber(self._goalTopic, Position, self._goal_callback, queue_size=1)\n rospy.Subscriber(self._objectTopic, ObjectsAtPosition,\n self._object_callback, queue_size=1)\n\n rospy.Subscriber(self._laserTopic, LaserAtPosition,\n self._laser_callback, queue_size=1)\n\n plt.show(block=True)", "def _target_callback(self, msg):\n self.target_pose = np.asarray(msg.pos)[np.newaxis].T\n self.target_vel = np.asarray(msg.vel)[np.newaxis].T\n self.target_acc = np.asarray(msg.acc)[np.newaxis].T\n\n print(\"\\nGoing to:\")\n print(\"Pos: \\n\" + str(self.target_pose))\n print(\"Vel: \\n\" + str(self.target_vel))\n print(\"Acc: \\n\" + str(self.target_acc))", "def move_to_position2(self):", "def seek_behaviour(self):\n x, y = (int (self.posicao[0]),int(self.posicao[1]))\n nx,ny = tuple(pontos[self.nextpoint])\n rot = self.rotacao\n direction = pontos[self.nextpoint]-self.posicao", "def callback(self):\n logger.debug( \"beginning %s callback\" % self.hardwareActionName) \n if not self.initialised:\n return \"%s not initialised with init function. Cannot be called back until initialised. Doing nothing\" % self.hardwareActionName\n try:#could update so that it doesn't send the message if the position is the samee, but the motor does that check automatically\n self.finalVariables = self.mapVariables()\n horizontalPosition = int(self.finalVariables[\"SnakePicoPlugHorizontalPosition\"])\n verticalPosition = int(self.finalVariables[\"SnakePicoPlugVerticalPosition\"])\n \n self.picomotor.absoluteMove(self.horizontalAxis, horizontalPosition)\n self.picomotor.absoluteMove(self.verticalAxis, verticalPosition)\n return \" %s : moved horizontal axis to pos=%s and vertical axis to pos=%s\" % (self.hardwareActionName,horizontalPosition,verticalPosition) \n except KeyboardInterrupt:\n raise\n except KeyError as e:\n return \"Failed to find variable %s in variables %s. Check variable is defined in experiment control \" % (e.message, self.variablesReference.keys())\n except Exception as e:\n return \"Failed to perform callback on %s. Error message %s\" % (self.hardwareActionName, e.message)", "def move_to_position1(self):", "def __position_jaw_current1_cb(self, data):\n self.__position_jaw_current[0] = data.position[0]\n self.__get_jaw_event[0].set()", "def currentstate_callback(self, odom):\n self.CurrentPosition = np.array([odom.pose.pose.position.x, odom.pose.pose.position.y, odom.pose.pose.position.z])\n self.CurrentVelocity = np.array([odom.twist.twist.linear.x, odom.twist.twist.linear.y, odom.twist.twist.linear.z])", "def joint_callback(data):\n joints[0] = data.position[9]\n joints[1] = data.position[10]\n joints[2] = data.position[11]\n joints[3] = data.position[12]\n joints[4] = data.position[13]\n global position_geted\n position_geted = True", "def move_to(self, x_pos, y_pos, z_pos):\n def ik_angles(X_Pos,Y_Pos,Z_Pos,Roll,Pitch,Yaw):\n \"\"\"\n Compute the joint angles needed to place the robot arm in a given pose.\n \"\"\"\n limb_side = 'left'\n ns = \"ExternalTools/\" + limb_side + \"/PositionKinematicsNode/IKService\"\n iksvc = rospy.ServiceProxy(ns, SolvePositionIK)\n ikreq = SolvePositionIKRequest()\n hdr = Header(stamp=rospy.Time.now(), frame_id='base')\n quat = tf.transformations.quaternion_from_euler(float(Roll),float(Pitch),float(Yaw))\n poses = {\n 'left': PoseStamped(\n header=hdr,\n pose=Pose(\n position=Point(\n\t\t x=float(X_Pos),\n y=float(Y_Pos),\n z=float(Z_Pos),\n ),\n orientation=Quaternion(\n\t\t x = quat[0],\n\t\t y = quat[1],\n\t\t z = quat[2],\n\t\t w = quat[3],\n\t\t )\n )\n )\n }\n\n ikreq.pose_stamp.append(poses[limb_side])\n try:\n rospy.wait_for_service(ns, 5.0)\n resp = iksvc(ikreq)\n except (rospy.ServiceException, rospy.ROSException), e:\n rospy.logerr(\"Service call failed: %s\" % (e,))\n return 1\n\n # Check if result valid, and type of seed ultimately used to get solution\n # convert rospy's string representation of uint8[]'s to int's\n resp_seeds = struct.unpack('<%dB' % len(resp.result_type),\n resp.result_type)\n if (resp_seeds[0] != resp.RESULT_INVALID):\n # Format solution into Limb API-compatible dictionary\n limb_joints = dict(zip(resp.joints[0].name, resp.joints[0].position))\n return limb_joints \n\n else:\n print(\"INVALID POSE - No Valid Joint Solution Found.\")\n\n return 0\n \n roll = 0\n pitch = 3.14\n yaw = 0 #controls roll of gripper\n\n #compute required joint angles\n angles = ik_angles(x_pos,y_pos,z_pos,roll,pitch,yaw)\n\n #move left limb to position\n limb = baxter_interface.Limb('left')\n limb.move_to_joint_positions(angles)\n \n #update current position\n self.x = x_pos\n self.y = y_pos\n self.z = z_pos\n \n return [x_pos, y_pos]", "def target_position(self, time):\n pass", "def target_position(self, time):\n pass", "def get_motor_position(self):\n print(\"voici la position du moteur\")", "def __position_jaw_current2_cb(self, data):\n self.__position_jaw_current[1] = data.position[0]\n self.__get_jaw_event[1].set()", "def set_position(self, updated):\n self.buff_x = updated[0]\n self.buff_y = updated[1]", "def move(self, p):\r\n self.position.setvalue(p)", "def set_position(self, position):\n self.gripper_io.set_signal_value(\"position_m\", position)", "def mousePosition(self):", "def setPosition(position):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def move(self):\n \n self.position = self.wander()", "def move_arm_to_position(self, desired_arm_position):\r\n\r\n\r\n # ---------------------------------------------------------------------\r\n # Done: 7. Implement this method, WITH YOUR INSTRUCTOR.\r\n # ---------------------------------------------------------------------\r\n if self.is_calibrated == False:\r\n self.calibrate_arm()\r\n\r\n if desired_arm_position >= self.arm_motor.get_position():\r\n self.arm_motor.turn_on(self.speed)\r\n while True:\r\n if self.arm_motor.get_position() >= desired_arm_position:\r\n break\r\n\r\n else:\r\n self.arm_motor.turn_on(self.speed*-1)\r\n while True:\r\n if self.arm_motor.get_position() <= desired_arm_position:\r\n break\r\n\r\n self.arm_motor.turn_off()", "def handleMove(self):\n pass", "def player_movement(self):", "def odom_callback(data):\n global x\n global y\n global theta\n x = data.pose.pose.position.x\n y = data.pose.pose.position.y\n rot_q = data.pose.pose.orientation\n (roll, pitch, theta) = euler_from_quaternion([rot_q.x, rot_q.y, rot_q.z, rot_q.w])", "def timerCallback(self,evprent):\n self._odom_list.waitForTransform('map', 'base_footprint', rospy.Time(0), rospy.Duration(1.0))\n (position, orientation) = self._odom_list.lookupTransform('map','base_footprint', rospy.Time(0)) #finds the position and oriention of two objects relative to each other (hint: this returns arrays, while Pose uses lists)\n self._current.position.x = position[0]\n self._current.position.y = position[1]\n\n self._current.orientation.x = orientation[0]\n self._current.orientation.y = orientation[1]\n self._current.orientation.z = orientation[2]\n self._current.orientation.w = orientation[3]\n q = [self._current.orientation.x,\n self._current.orientation.y,\n self._current.orientation.z,\n self._current.orientation.w] # quaternion nonsense\n\n (roll, pitch, yaw) = euler_from_quaternion(q)", "def _drive_player_position(self) -> None:\n player = self._player\n if player:\n assert self.node\n assert player.node\n self.node.connectattr('torso_position', player.node, 'position')", "def set_robot_pos(self):\n\t\tx,y,z = self.geo2desiredENU(self.curr_lat, self.curr_lon, self.gpsAlt)\n\t\tself.robot_msg.point.x = x\n\t\tself.robot_msg.point.y = y\n\t\tself.robot_msg.point.z = z", "def gazebo_state_callback(self, data):\n\n self.x = 0\n self.y = 0\n self.heading = \"\"\n\n # Identify the index containing the position link state\n index = 0\n namespace = rospy.get_namespace()\n namespace = namespace[1:-1] + \"::base_link\"\n\n try:\n index = data.name.index(namespace)\n except Exception:\n rospy.logdebug(\"Failed to get index. Skipping...\")\n return\n\n # Extract the information\n self.x = data.pose[index].position.x\n self.y = data.pose[index].position.y\n heading = self.quaternion_to_yaw(data.pose[index]) * 180 / math.pi\n\n if heading > 0:\n self.heading = heading\n else:\n self.heading = 360 + heading", "def _setup_move(self, position):\n self.log.debug(\"%s.setpoint = %s\", self.name, position)\n self.setpoint.put(position, wait=True)\n if self.actuate is not None:\n self.log.debug(\"%s.actuate = %s\", self.name, self.actuate_value)\n self.actuate.put(self.actuate_value, wait=False)", "def cmd_position(self, n, e, d, heading):\n pass", "def pose_callback(msg):\n\t#Print the values of the x,y,theta of the Turtle:\n rospy.loginfo(\"x: %.11f, y: %.11f, theta: %.11f \", msg.x, msg.y, msg.theta)", "def _local_pose_cb(self, msg):\n self.local_pose = msg", "def crouched_position(mp):\n joints = ['LHipPitch', 'RHipPitch', 'LKneePitch', 'RKneePitch']\n ankles = ['LAnklePitch', 'RAnklePitch']\n\n joint_angles = [-0.6074221134185791,\n -0.4356980323791504,\n 1.6413381099700928,\n 1.5739259719848633]\n\n ankle_angles = [-0.9403839111328125, -1.0461461544036865]\n\n # actuation\n mp.setAngles(joints, joint_angles, 0.1)\n time.sleep(0.420)\n mp.setAngles(ankles, ankle_angles, 0.1)", "def __init__(self):\n self.position = 0", "def update(self, pos):\n\t\tpass", "def _setup_move(self, position):\n\n def done_moving(**kwargs):\n self.log.debug(\"%s async motion done\", self.name)\n self._done_moving(success=True)\n\n if self.done is None:\n # No done signal, so we rely on put completion\n moving_val = 1 - self.done_value\n self._move_changed(value=moving_val)\n\n self.log.debug(\"%s.setpoint = %s\", self.name, position)\n\n if self.actuate is not None:\n self.setpoint.put(position, wait=True)\n\n self.log.debug(\"%s.actuate = %s\", self.name, self.actuate_value)\n self.actuate.put(self.actuate_value, wait=False, callback=done_moving)\n else:\n self.setpoint.put(position, wait=False, callback=done_moving)", "def set_position(self, az_pos, el_pos):\n raise NotImplementedError()", "def AeroMove(self, pos):\r\n\r\n pass", "def arm_calibration(self):\n self.arm_motor.run_forever(speed_sp=900)\n while not self.touch_sensor.is_pressed:\n time.sleep(0.01)\n self.arm_motor.stop(stop_action=\"brake\")\n ev3.Sound.beep()\n self.arm_motor.run_to_rel_pos(\n speed_sp=900, position_sp=-5100)\n self.arm_motor.wait_while(ev3.Motor.STATE_RUNNING)\n print('motor is no longer running')\n ev3.Sound.beep()\n self.arm_motor.position = 0", "def init_position(self):\n if self.invert_init_angle is False:\n self.theta_i_top = -self.theta_s_top\n self.theta_i_bot = -self.theta_s_bot\n else:\n self.theta_i_top = self.theta_s_top\n self.theta_i_bot = self.theta_s_bot\n\n self.move_mid_block(theta=self.theta_i_bot)\n self.move_top_block(theta=self.theta_i_top)\n\n # Variables used to motion\n self.x_offset = self.block_top.center.x\n self.d_top = np.sin(self.theta_s_top) * self.bars_top.length * 2\n self.d_bot = np.sin(self.theta_s_bot) * self.bars_bot.length * 2", "def __callback(self, msg) -> Dict:\n self.__position = Dict(\n {\n \"x\": msg.pose.pose.position.x,\n \"y\": msg.pose.pose.position.y,\n \"z\": msg.pose.pose.position.z,\n }\n )\n self.__quaternion_orientation = Dict(\n {\n \"x\": msg.pose.pose.orientation.x,\n \"y\": msg.pose.pose.orientation.y,\n \"z\": msg.pose.pose.orientation.z,\n \"w\": msg.pose.pose.orientation.w,\n }\n )", "def callback_pose(data):\n x = data.pose.pose.position.x\n y = data.pose.pose.position.y\n roll, pitch, yaw = euler_from_quaternion([data.pose.pose.orientation.x,\n data.pose.pose.orientation.y,\n data.pose.pose.orientation.z,\n data.pose.pose.orientation.w])", "def target_position(self, time):\n\n x_pos = self.r*sin(self.w*time)+self.ar_tag_pos[0]\n y_pos = self.r*cos(self.w*time)+self.ar_tag_pos[1]\n z_pos = self.ar_tag_pos[2]\n # print(x_pos,y_pos)\n # raise NotImplementedError\n return np.array([x_pos,y_pos,z_pos])", "def mousePositionRaw(self):", "def mousePositionRaw(self):", "def cmd_vel_callback(self, msg):\n with self._cmd_vel_lock:\n self._x_linear_cmd = msg.linear.x\n self._z_angular_cmd = msg.angular.z\n self._last_cmd_vel_time = rospy.get_rostime()", "def go_to_position(self, position):\n raise NotImplementedError", "def process_IN_MOVE_SELF(self, event):", "def processOdom(self, msg):\n self.x = msg.pose.pose.position.x\n self.y = msg.pose.pose.position.y\n self.z = 180 * (msg.pose.pose.orientation.z % 2)\n print(self.z)", "def arm(self):\n pass", "def updatePosition(self):\n\n #For this update, a time-step of 1 is assumed ->Change Code if not true\n self.position = [self.position[0] + self.velocity[0], self.position[1]+self.velocity[1]]", "def position(self, position):\n self.move_to(position)", "def positioning(self):\n pass", "def update_pos(self):\n s = self\n s.rpos = s.rects[0].inf\n s.pos = s.physics.scl_coord_res(s.rpos)", "def setPos(self,pos,c='cartesian'):\n\t\t#self.velocities={'angMachine':11.8*2*pi/60., 'angCranes': 15*pi/180, 'radial': 1} #radians/sec and m/s\n\t\ttraveltime=0\n\t\tif pos != self.pos:\n\t\t\tself.lastPos=self.pos\n\t\t\tif c=='cartesian':\n\t\t\t\tself.pSpots.append(pos)\n\t\t\t\tself.pos=pos\n\t\t\t\tself.posCyl=self.m.getCylindrical(pos)\n\t\t\telif c=='cylindrical':\n\t\t\t\tself.posCyl=pos\n\t\t\t\tself.pos=self.m.getCartesian(pos)\n\t\t\t\tself.pSpots.append(self.pos)\n\t\t\telse:\n\t\t\t\traise Exception(\"ERROR: setPos only accepts cartesian and cylindrical coordinates %s\"%c)\n\t\t\tif self.otherDevice is not None:\n\t\t\t\t#get angles in radians:\n\t\t\t\tif self.mountPoint=='left':\n\t\t\t\t\t[betaPr, alphaPr, thPr, r1Pr, r2Pr]=self.m.getAngles(self.pos, self.otherDevice.pos, optimize=True)\n\t\t\t\t\t[beta, alpha, th, r1,r2]=self.m.getAngles(self.lastPos, self.otherDevice.pos, optimize=True)\n\t\t\t\telse:\n\t\t\t\t\t[alpha, beta, th, r1, r2]=self.m.getAngles(self.otherDevice.pos,self.lastPos, optimize=True)\n\t\t\t\t\t[alphaPr, betaPr, thPr, r1Pr, r2Pr]=self.m.getAngles(self.otherDevice.pos,self.pos, optimize=True)\n\t\t\t\tself.m.leftAngleMoni.observe(betaPr*360./(2*pi), self.sim.now())\n\t\t\t\tself.m.rightAngleMoni.observe(alphaPr*360./(2*pi), self.sim.now())\n\t\t\t\ttraveltime+=self.m.timeConstants['maincrane']+abs(thPr-th)/float(self.m.velocities['angMachine'])\n\t\t\t\t#so, maincrane has moved.. time for the smaller cranes. Move them one by one.\n\t\t\t\tfor arg in [(abs(alpha-alphaPr), abs(r1-r1Pr)), (abs(beta-betaPr), abs(r2-r2Pr))]:\n\t\t\t\t\ttime=self.m.timeConstants['subcrane']+max(arg[0]/float(self.m.velocities['angCranes']), arg[1]/float(self.m.velocities['radial']))\n\t\t\t\t\ttraveltime+=time\n\t\t\telse: #1a\n\t\t\t\toldCyl=self.m.getCylindrical(self.lastPos)\n\t\t\t\tdTh=abs(oldCyl[1]-self.posCyl[1])\n\t\t\t\tdr=abs(oldCyl[0]-self.posCyl[0])\n\t\t\t\ttraveltime+=self.m.timeConstants['maincrane']+max(dTh/self.m.velocities['angMachine'], dr/self.m.velocities['radial'])\n\t\t\tself.lastPos=self.pos #the way it should be..\n\t\t\tself.moveEvent.signal() #tell the other head that a movement has occured.\n\t\tself.timeConsumption['crane movement']+=traveltime\n\t\treturn traveltime", "def _init(self, position):\n\t\tself._position = position", "def _global_pose_cb(self, msg):\n self.global_pose = msg", "def __init__(self, pos):\r\n self.pos = pos", "def target_position(self, time):\n # get joint positions and use fk to get end effector position?\n # ar_tag from topic\n\n cur_pos = self.target_velocity(time)*time + self.start_pos\n\n self.points_generated.append(cur_pos)\n #print(self.start_pos)\n # print(cur_pos)\n return cur_pos", "def _target_global_pose_cb(self, msg):\n self.target_global_pose.append(msg)\n self.target_global_pose = self.target_global_pose[1:]", "def waypoint_callback(self,msg):\n self.waypoint_loc = msg.data", "def poseCallback(self, msg):\n\n state = self.rcs.getState()\n state[0] = msg.pose.position.x\n state[1] = msg.pose.position.y\n quat = np.array((msg.pose.orientation.x,\n msg.pose.orientation.y,\n msg.pose.orientation.z,\n msg.pose.orientation.w))\n # yaw\n (_,_,yaw) = euler_from_quaternion(quat)\n state[2] = yaw\n\n self.rcs.setState(state)\n self.rcs.runScan()", "def update_position(self, canvas):\n if self.x <= 0:\n if self.direction == \"SW\":\n self.direction = \"SE\"\n if self.direction == \"W\":\n self.direction = \"E\"\n if self.direction == \"NW\":\n self.direction = \"NE\"\n if self.x >= canvas.width:\n if self.direction == \"SE\":\n self.direction = \"SW\"\n if self.direction == \"E\":\n self.direction = \"W\"\n if self.direction == \"NE\":\n self.direction = \"NW\"\n if self.y <= 0:\n if self.direction == \"NW\":\n self.direction = \"SW\"\n if self.direction == \"N\":\n self.direction = \"S\"\n if self.direction == \"NE\":\n self.direction = \"SE\"\n if self.y >= canvas.height:\n if self.direction == \"SW\":\n self.direction = \"NW\"\n if self.direction == \"S\":\n self.direction = \"N\"\n if self.direction == \"SE\":\n self.direction = \"NE\"\n if self.direction == \"N\":\n self.y -= 1\n if self.direction == \"NE\":\n self.y -= 1\n self.x += 1\n if self.direction == \"E\":\n self.x += 1\n if self.direction == \"SE\":\n self.x += 1\n self.y += 1\n if self.direction == \"S\":\n self.y += 1\n if self.direction == \"SW\":\n self.x -= 1\n self.y += 1\n if self.direction == \"W\":\n self.x -= 1\n if self.direction == \"NW\":\n self.y -= 1\n self.x -= 1", "def append_cursor_pos_callback(self, callbacked, *args, **kwargs):\n pass" ]
[ "0.7052078", "0.70264876", "0.688068", "0.67120135", "0.6546346", "0.65419245", "0.65237623", "0.6485038", "0.64839834", "0.6448002", "0.6438029", "0.64271504", "0.6418365", "0.63853776", "0.63853776", "0.63853776", "0.63853776", "0.63853776", "0.63853776", "0.63853776", "0.63853776", "0.63853776", "0.63853776", "0.63853776", "0.635046", "0.6339998", "0.63322276", "0.6325836", "0.63257086", "0.63185173", "0.63119715", "0.62761825", "0.61945975", "0.61877304", "0.61770236", "0.6176547", "0.6166801", "0.6166801", "0.6124832", "0.61211956", "0.6120441", "0.6113943", "0.6110307", "0.6084848", "0.6081528", "0.6072329", "0.6072329", "0.6072329", "0.6072329", "0.6072329", "0.6072329", "0.6072329", "0.6072329", "0.6072329", "0.6072329", "0.6072329", "0.60622007", "0.6044815", "0.6039078", "0.6036763", "0.60258913", "0.6003245", "0.6002321", "0.59787565", "0.5965927", "0.5965426", "0.596531", "0.59501046", "0.59366196", "0.5929951", "0.59272575", "0.5920662", "0.59093636", "0.590456", "0.58859193", "0.58758503", "0.58718896", "0.5867264", "0.5860913", "0.5846066", "0.5841846", "0.5841846", "0.5835256", "0.5833163", "0.58212996", "0.58189166", "0.5816707", "0.58157235", "0.5814401", "0.5811684", "0.5809756", "0.5800935", "0.5800878", "0.58000207", "0.5798193", "0.5796671", "0.5793089", "0.57908845", "0.5789046", "0.57874995", "0.5786087" ]
0.0
-1
Reset in case it is needed in the future
def reset(self): self.position = np.zeros(self.ndegres) self.velocity = np.zeros(self.ndegres) self.state = np.zeros(2*self.ndegres) self.flag = 0 self.h_ref = np.array([self.ref for _ in range(self.horizon)]) self.action = np.zeros(self.ACTION_DIM) self.h_action = np.zeros(self.ACTION_DIM*self.horizon)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _reset(self) -> None:", "def _reset(self) -> None:", "def _reset(self):\n pass", "def reset():", "def reset():", "def reset():", "def reset():\r\n pass", "def reset(self) -> None:", "def reset(self) -> None:", "def reset(self) -> None:", "def _reset(self):", "def reset(self):\n \n pass", "def reset(self):\n ...", "def reset(self):\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self) -> None:\n ...", "def reset(self):\n\t\tpass", "def reset(self):", "def reset(self):", "def reset(self):", "def reset(self):", "def reset(self):\r\n pass", "def reset(self):\r\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n return", "def reset(self) -> None:\n pass", "def reset(self):\n raise NotImplementedError", "def reset(self):\n raise NotImplementedError", "def reset(self):\n raise NotImplementedError", "def reset(self):\n raise NotImplementedError", "def reset(self):\n raise NotImplementedError", "def Reset(self):\n pass", "def reset():\n pass", "def reset():\n pass", "def reset(self):\n self.reset_count += 1\n self._init_data()", "def reset(self):\n raise NotImplementedError()", "def reset(self):\n raise NotImplementedError()", "def reset(self):\n raise NotImplementedError()", "def reset(self):\n raise NotImplementedError()", "def reset() -> None:\n ...", "def reset(self) -> None:\n raise NotImplementedError", "def reset(self):\n raise NotImplementedError", "def hard_reset() -> NoReturn:", "def reset(self):\n self._set_init()", "def reset(self):\n self.counter = 0", "def reset(self):\n pass", "def reset(self):\n pass", "def reset (self):\n self.counter = 0", "def reset(self):\n self.__init__()", "def reset(self):\n self.__init__()", "def reset(self):\n self.__init__()", "def reset(self):\n # replace with your code\n pass", "def reset(self):\n self.logger.debug(\"Resetting...\")\n pass", "def reset(self, *args, **kwargs):", "def reset(self, *args, **kwargs):\n ...", "def reset(self):\n self.previous = None\n self.state = None\n self.args = None\n self.context = None", "def reset() -> None:\n\t_flag.clear()", "def reset(self, *args, **kwargs):\n pass", "def reset(self):\n self.accumulation = None", "def reset(self):\n self.accumulation = None", "def reset(self):\n raise NotImplementedError('Abstract method \"reset\" must be '\n 'specialised!')", "def reset(self):\n self.temp_data.clear()", "def reset(self):\n self.clear()", "def reset():\n _runtime.reset()", "def reset(self):\n self._setupObjects()", "def _reset(self):\n self._value = self._default", "def reset(self):\n raise AssertionError(\"Reset function not implemented\")", "def reset(self):\n super(self.__class__, self).reset(self)", "def deinit(self):\n self.reset()", "def reset(self) -> None:\n self[-1].reset()", "def __init__(self):\n self.reset()", "def __init__(self):\r\n\r\n self.reset()", "def reset(self):\n self._idx = 0", "def reset(self):\n self.value = None", "def reset(self):\n self.stats = {}", "def __reset__(self):\n\n for i in self.__dict__.keys():\n self.__dict__[i] = None", "def reset(self):\n self.restart()\n self.cycles = 0", "def reset(self):\n return self._reset", "def reset(self):\n return self._reset" ]
[ "0.90287495", "0.90287495", "0.8958957", "0.89266604", "0.89266604", "0.89266604", "0.89041", "0.88952583", "0.88952583", "0.88952583", "0.8890957", "0.8875916", "0.8809804", "0.8809804", "0.88026774", "0.88026774", "0.88026774", "0.88026774", "0.88026774", "0.88026774", "0.88026774", "0.88026774", "0.8802665", "0.87755185", "0.87755185", "0.87755185", "0.87755185", "0.87410957", "0.87410957", "0.8732048", "0.8732048", "0.8732048", "0.8732048", "0.8732048", "0.8732048", "0.8732048", "0.8732048", "0.8732048", "0.8732048", "0.8732048", "0.8732048", "0.8732048", "0.8732048", "0.8732048", "0.8732048", "0.8732048", "0.8706751", "0.8537499", "0.851642", "0.851642", "0.851642", "0.851642", "0.851642", "0.845759", "0.84067416", "0.84067416", "0.8391977", "0.83616316", "0.83616316", "0.83616316", "0.83616316", "0.83499146", "0.82753026", "0.8268022", "0.8219099", "0.81864333", "0.81719065", "0.81702125", "0.81702125", "0.8154273", "0.81445086", "0.81445086", "0.81445086", "0.81148076", "0.80861735", "0.80784374", "0.80480975", "0.8023837", "0.8023226", "0.8018883", "0.793574", "0.793574", "0.79231685", "0.791788", "0.7902255", "0.78657454", "0.78622454", "0.78561324", "0.78532064", "0.7850319", "0.7827102", "0.7824638", "0.7814859", "0.7790289", "0.77752656", "0.7764922", "0.77494794", "0.7748525", "0.77390295", "0.7738929", "0.7738929" ]
0.0
-1
Set next handler of the chain
def set_next(self, handler): self.next = handler return handler
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_handler(self, handler):\n self.next_handler = handler", "def _handler_changed(self, handler):\n if self.next is not None:\n self.next.handler = handler", "def setNext(self, next):\n\t\t\tself.next = next", "def next(self, next):\n\n self._next = next", "def next(self, next):\n\n self._next = next", "def chain(self, chain):\n\n self._chain = chain", "def _wrapped_handler_ref_changed(self, wrapped_handler_ref):\n if self.next is not None:\n self.next.wrapped_handler_ref = wrapped_handler_ref", "def send_next(self):\n event = next(self)\n self.send(event)\n return event", "def set_next(self, frame):\n self._set_stopinfo(frame, None)", "def set_next(self, next_logger: Logger):\n self.next = next_logger\n return self.next", "def set_next(self, next: Callable[[UserMessage], None], is_output):\n\n self.next = next\n self.is_output = is_output", "def add_handler(self, path, handler):\n if path: # guard against Null path, we assume handler could be Null\n path_list = self.split_path(path)\n self.trie.insert(step_list=path_list, handler=handler)", "def next(self, event):\n self.result = 1", "def set_next(self, new_next):\n self.next = new_next", "def next(self) -> Optional[Chainable]:\n return None", "def _set_link(self, value, handler):\n self._mapping[value] = handler", "def __next__(self):\n\n pass", "def add_handler(self, handler):\n pass", "def next( self ):\n next(self)", "def set_handler(self, handler):\n self._handler = handler", "def setNext(self, nextNode):\n self.__next = nextNode", "def set_next(self, node):\r\n self.__next = node", "def set_next(self, next_layer):\n self.next_layer = next_layer", "def __next__(self):\n pass", "def __next__(self):\n pass", "def __next__(self):\n pass", "def __next__(self):\n pass", "def set_next(self, node):\n self.__next = node", "def next(action, value, error_handle, skip_invoked=True):\n error_handle['action'] = 'NEXT'\n if skip_invoked:\n print_info(\"failure action= next\")\n return error_handle", "def set_next(node, value):\n node['next'] = value", "def register_handler(self, handler):\r\n self.handler = handler", "def next(self):\r\n pass", "def next(self):\n pass", "def next(self):\n pass", "def next(self):\n pass", "def next(self):\n pass", "def apply_next_event(self):\n self.apply_event(self.next_event())\n self._current_event_ndx += 1", "def insert_callback(self, chain, value):", "def goto_next_level(self, *args):\n self.manager.current = self.manager.next()\n self.reset()", "def add_handler(self, handler, backtrack = False):\n\n # Add Handler\n self._handlers.append(handler)\n logger.debug(\"%s: handler %s added.\" % \\\n (self.__class__.__name__, handler.__name__))\n \n # Backtrack\n if backtrack:\n for message in self.get_waiting(): handler(message)\n logger.debug(\"%s: handler %s backtracked.\" % \\\n (self.__class__.__name__, handler.__name__))", "def set_added_handler(self, handler):\n self._added_handler = handler", "def add_handler(self, handler):\n self.register(abcs.AHandler, handler, handler)", "def create_callback(self, chain):", "def set_next_state(self, state):\n self.next_state = state", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def dispatch_next(self):\r\n self._dispatch_amount += 1\r\n while self._dispatch_amount:\r\n try:\r\n # XXX: possible race condition shuffling the order of\r\n # dispatches in the next two lines.\r\n func, args, kwargs = next(self._original_iterable)\r\n self.dispatch(func, args, kwargs)\r\n self._dispatch_amount -= 1\r\n except ValueError:\r\n \"\"\" Race condition in accessing a generator, we skip,\r\n the dispatch will be done later.\r\n \"\"\"\r\n except StopIteration:\r\n self._iterating = False\r\n self._original_iterable = None\r\n return", "def __rshift__(self, next: 'IO[TResult]') -> 'IO[TResult]':\n return self.bind(lambda _: next)", "def __rshift__(self, next):\n @AccessFilter\n def f(*args, **kwargs):\n first = self(*args, **kwargs)\n if first is None:\n return None\n return next(first)(*args, **kwargs)\n return f", "def set_event_handler(self, handler):\r\n if self._handler:\r\n handler.pre_listeners = self._handler.pre_listeners\r\n handler.post_listeners = self._handler.post_listeners\r\n self._handler = handler\r\n self._handler.c = self\r\n self._handleFn = handler._handle1", "def next():", "def next():", "def next(self):\n self.jumpahead(1)", "def __call__(self):\r\n return self.next()", "def tb_set_next(tb, next):\n if not (isinstance(tb, TracebackType) and\n (next is None or isinstance(next, TracebackType))):\n raise TypeError('tb_set_next arguments must be traceback objects')\n obj = _Traceback.from_address(id(tb))\n if tb.tb_next is not None:\n old = _Traceback.from_address(id(tb.tb_next))\n old.ob_refcnt -= 1\n if next is None:\n obj.tb_next = ctypes.POINTER(_Traceback)()\n else:\n next = _Traceback.from_address(id(next))\n next.ob_refcnt += 1\n obj.tb_next = ctypes.pointer(next)", "def insert_callback(self, chain, value):\n for reactor in self._reactors:\n reactor.insert_callback(chain, value)", "def set_next(self, key: str):\n if not self.next:\n self._next = [key]\n elif key not in self.next:\n self._next.append(key)\n return self", "def setNextIface(self): \n self.nextIface+=1", "def next(self, _event):\n self.set_val(self.val + 1)", "def set_handler(key):\n def wrapper(func):\n func.set_key = key\n return func\n\n return wrapper", "def add_handler(self, path, handler) -> None:\n if self.__test_path(path) and self.__test_path(handler):\n path_parts = self.__split_path(path) # Splits parts into constituent components\n self.route_trie.insert(path_parts, handler) # Passes parts on for addition to the trie", "def make_new_handler(self, *args, **kwargs):", "def __next__(self):\n\t\treturn next()", "def next(self):\n self._select_interface(self._rc_next, self._http_next)", "def set_step(self):\n super(Pdb, self).set_step()\n if hasattr(self, \"_set_trace_use_next\"):\n del self._set_trace_use_next\n self.set_next(self._via_set_trace_frame)", "def next_remediation(self, next_remediation):\n\n self._next_remediation = next_remediation", "def default(self, handler: Handler):\n if asyncio.iscoroutinefunction(handler):\n self._default_handler = AsyncCommandProxy(handler, self.parser)\n else:\n self._default_handler = CommandProxy(handler, self.parser)\n return handler", "def setNext(self, next_node):\n self.__nextListNode = next_node", "def _chain(_):\n if f.done():\n return\n if recvd.exception():\n f.set_exception(recvd.exception())\n else:\n buf = recvd.result()\n try:\n loaded = load(buf)\n except Exception as e:\n f.set_exception(e)\n else:\n f.set_result(loaded)", "def next(self):\n return type(self).__next__(self)", "def GetNext(self, *args, **kwargs):\n pass", "def setup_middlewares(self):\n is_output = self._get_connector_type() == self.OUTPUT_CONNECTOR_TYPE\n \n last = None\n for middleware in self.get_middlewares():\n \n if last is not None:\n last.set_next(middleware.compute, is_output)\n\n last = middleware\n self.used_middlewares.append(middleware)\n\n\n defualt_path_middleware = RasaDefaultPathMiddleware(self._get_default_path())\n\n if len(self.used_middlewares) > 0:\n last.set_next(defualt_path_middleware, is_output)\n else:\n self.used_middlewares = [defualt_path_middleware, ]\n \n self.middleware_is_ready = True", "def setDataRequestHandler(self, handler):\n self.dataRequestHandler = handler", "def doNext(self, action):\n if self.handleApplyOnNext:\n return self.doHandleApply(action)\n return True", "def next_step(self):\n self.proceed()\n self.execute_current()", "def __init__(self, handler):\n self.__handler = handler", "def _dispatch_changed(self, dispatch):\n if self.next is not None:\n self.next.dispatch = dispatch", "def _set(self, value):\n value = self._call_func(value)\n self._set_value(value)\n if value is undefined:\n return # no need to update\n for signal in self._downstream_reconnect[:]: # list may be modified\n signal.connect(False)\n for signal in self._downstream:\n signal._set_status(1, self) # do not set status of *this* signal!", "def next(self):\n return self.__next__()", "def next(self):\n return self.__next__()", "def next(self):\n return self.__next__()", "def next(self):\n return self.__next__()", "def next(self):\n return self.__next__()", "def next(self):\n return self.__next__()", "def next(self):\n return self.__next__()" ]
[ "0.7661297", "0.6707803", "0.6030326", "0.59693336", "0.59693336", "0.5968152", "0.5833051", "0.5818501", "0.57607543", "0.57461786", "0.5692767", "0.566452", "0.5590942", "0.5582867", "0.557292", "0.55651385", "0.55508965", "0.5547468", "0.55370283", "0.55218494", "0.5515645", "0.5487466", "0.5444706", "0.5423415", "0.5423415", "0.5423415", "0.5423415", "0.54202133", "0.5407615", "0.53480464", "0.52994585", "0.5294839", "0.527588", "0.527588", "0.527588", "0.527588", "0.5244853", "0.52358335", "0.52352095", "0.5233661", "0.52031344", "0.51893115", "0.51873064", "0.5181354", "0.5169482", "0.5169482", "0.5169482", "0.5169482", "0.5169482", "0.5169482", "0.5169482", "0.5169482", "0.5169482", "0.5169482", "0.5169482", "0.5169482", "0.5169482", "0.5169482", "0.5169482", "0.5169482", "0.5169482", "0.5124582", "0.5118039", "0.511725", "0.51142263", "0.5106948", "0.5106948", "0.51065046", "0.5104728", "0.51012504", "0.509962", "0.5083297", "0.50777406", "0.5073504", "0.5071577", "0.50704885", "0.50663716", "0.50529236", "0.5046778", "0.5033982", "0.5026088", "0.5021028", "0.50209755", "0.5010569", "0.5006947", "0.5000958", "0.5000938", "0.49819183", "0.4971256", "0.4964744", "0.4963251", "0.49582434", "0.49575034", "0.4955636", "0.4955636", "0.4955636", "0.4955636", "0.4955636", "0.4955636", "0.4955636" ]
0.80296415
0
This method must be implemented by child
def handle(self, context: Context): raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self):\r\n raise NotImplementedError('override me')", "def __call__(self):\n raise NotImplementedError", "def override(self):\n return None", "def __call__(self):\n raise NotImplementedError()", "def __call__(self):\n\t\treturn", "def __call__( self ):\n pass", "def extension (self):\n assert False, \"To be implemented by child\"", "def __call__(self):\n pass", "def __call__(self):\n pass", "def __call__(self) -> None:", "def support(self):", "def base(self):\n raise NotImplementedError()", "def handle(self):", "def intuit(self):\n raise NotImplemented()", "def handle(self):\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\r\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\r\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\r\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\n raise NotImplementedError", "def use(self):", "def run(self):\n raise NotImplementedError # implement in subclass", "def __call__(self, **kwargs):\n raise NotImplementedError", "def run(self):\n raise Exception('derived class should redefine this function')", "def __call__(self):\r\n raise self", "def __call__(self):\r\n raise self", "def primary(self):\n ...", "def process(self):\n raise NotImplementedError('Method must be implemented by subclass.')", "def regular(self):", "def override(self,scope):", "def act(self):\n raise NotImplementedError", "def __call__(self) -> dict:\n\t\tpass", "def __init__(self):\n\t\tsuper().__init__()", "def __init__(self):\n\t\tsuper().__init__()", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def render(self):", "def __post_init__(self):\n pass", "def think(self):\n pass", "def _base(self):\n pass", "def offering(self):\r\n raise NotImplementedError()", "def child_overriden(self):\n raise NotImplementedError(\n \"{} does not have implemented `child_overriden`\".format(self)\n )", "def perform(self):\n raise TypeError(\"Derived class must implement\")", "def basic(self):\n pass", "def _hook(self):", "def method(self):", "def implement(self):\n\t#@DEBUG remove comments", "def __init__(self):\r\n\t\tpass", "def handle(self) -> None:", "def d(self):\n pass", "def d(self):\n pass", "def render(self):\n pass", "def render(self):\n pass", "def render(self):\n pass", "def render(self):\n pass", "def render(self):\n pass", "def render(self):\n pass", "def __call__(self):", "def __call__(self):", "def object(self):", "def target(self):", "def render(self):\n raise NotImplementedError", "def bad(self):\n raise NotImplementedError", "def bad(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError()", "def call(self):", "def process(self):", "def process(self):", "def process(self):", "def __int__(self):\n pass", "def __call__(self, *args, **kwargs) -> None:\n raise NotImplementedError()", "def render(self):\n raise NotImplementedError()", "def run(self):\n raise NotImplemented(\"Inheriting classes should implement this\")", "def function(self):\n raise NotImplementedError", "def child(self, v, c):\n # method here", "def __post_init__(self) -> 'None':", "def on(self):\n raise NotImplementedError", "def _proceed(self):\n raise NotImplementedError", "def render(self):\n raise Exception(\"Abstract method\")", "def __init__(self):\n super()", "def check(self):\n raise NotImplementedError('Must be implemented by subclass.')", "def run(self):\n raise NotImplementedError(\"Subclass must implement abstract method\")", "def moi(self):\n\n pass", "def processing(self):\n pass", "def _prepare(self):", "def _prepare(self):", "def _init(self):\n raise NotImplementedError", "def test(self):\n raise NotImplementedError", "def process(self):\n raise NotImplementedError", "def c(self):\n pass", "def c(self):\n pass", "def org(self):\r\n raise NotImplementedError()", "def build(self):", "def build(self):", "def build(self):", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass" ]
[ "0.7916674", "0.79126203", "0.7673766", "0.7651007", "0.7559392", "0.74367213", "0.73910874", "0.7356369", "0.7356369", "0.7195709", "0.7069751", "0.7009408", "0.68268293", "0.6821625", "0.68035114", "0.67765045", "0.67765045", "0.67765045", "0.6763297", "0.6763297", "0.67564404", "0.6747451", "0.67318493", "0.6715212", "0.6713269", "0.6713269", "0.6701951", "0.66770345", "0.6645348", "0.6642324", "0.6641296", "0.6582257", "0.6580308", "0.6580308", "0.6555676", "0.6555676", "0.6555676", "0.6555676", "0.6551259", "0.65341717", "0.6519882", "0.6506764", "0.65067387", "0.65000665", "0.6497084", "0.6494126", "0.64772826", "0.6457752", "0.6443246", "0.6429665", "0.6428721", "0.6417125", "0.6417125", "0.641245", "0.641245", "0.641245", "0.641245", "0.641245", "0.641245", "0.64092183", "0.64092183", "0.6401463", "0.6400537", "0.63875484", "0.6386841", "0.6386841", "0.63868016", "0.6378964", "0.6375205", "0.6375205", "0.6375205", "0.6373126", "0.6364478", "0.63538766", "0.6352379", "0.6329975", "0.6324742", "0.6321464", "0.6315493", "0.6306909", "0.63057256", "0.6300862", "0.6299915", "0.6297182", "0.6295506", "0.62949866", "0.62868", "0.62868", "0.62803584", "0.6274145", "0.6273756", "0.62722963", "0.62722963", "0.6264816", "0.6263749", "0.6263749", "0.6263749", "0.62633824", "0.62633824", "0.62633824", "0.62633824" ]
0.0
-1
Check whether a given seed is still unexplored.
def check_seed(self, seed): out = self.complement(seed) return self.solver.solve([(i + 1) for i in seed] + [-(i + 1) for i in out])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_seed_valid(seed):\n for ch in seed:\n if not ch in all_chars_set:\n return False\n return True", "def is_seed_valid(seed):\n for ch in seed:\n if not ch in all_chars_set:\n return False\n return True", "def is_seed_valid(seed):\n if seed == \"0\":\n return True\n\n for ch in seed:\n if not ch in all_chars_set:\n return False\n return True", "def _check_random_state(seed):\n return check_random_state(seed)", "def __isTileInExplored(self, tile):\n for eachTile in self.explored:\n if eachTile.coordinate == tile.coordinate:\n return True\n return False", "def confused(self, rand):\n return rand > 0", "def red_has_won(self):\n return not any([self.squares[p].is_red() for p in self.squares \\\n if self.squares[p]])", "def is_exploring(self, step):\n return np.random.rand() < self._epsilon(step)", "def is_indeed(self) -> bool:\n return self.mukluk > 5", "def has_uniform_seed_margin(self, seed_margin=20.0):\n margin = np.ceil(np.reciprocal(np.array(CONFIG.volume.resolution),\n dtype=np.float64) * seed_margin).astype(np.int64)\n\n mask_target = self.label_mask\n # If data is unlabeled, can not test so always succeed.\n if mask_target is None:\n return True\n # Seed location in the mask accounting for offset of label from image.\n ctr = self.seed - (np.asarray(self.image.shape) - np.asarray(mask_target.shape)) // 2\n seed_fov = (ctr - margin, ctr + margin + 1)\n seed_region = mask_target[seed_fov[0][0]:seed_fov[1][0],\n seed_fov[0][1]:seed_fov[1][1],\n seed_fov[0][2]:seed_fov[1][2]]\n return np.all(seed_region)", "def check_win(self, color):\n if dijkstra(self, color) == 0:\n return True\n else:\n return False", "def is_destroyed(self) -> bool:\n return self._coords == self.damaged_cells", "def testKingOnly(board):\n return bin(board.friends[board.color]).count(\"1\") == 1", "def checkEndOfGame(self, colorIndex):\n checkColor = self.grid.REPRESENTATION[colorIndex]\n otherColor = self.grid.REPRESENTATION[1-colorIndex]\n emptyColor = self.grid.REPRESENTATION[2]\n for i in range(1, self.grid.width+1):\n for j in range(1, self.grid.height+1):\n if self.grid[i, j] != checkColor:\n continue\n if (i > 2) and (self.grid[i-1, j] == otherColor) and (self.grid[i-2, j] == emptyColor):\n return False\n if (i < self.grid.width-1) and (self.grid[i+1, j] == otherColor) and (self.grid[i+2, j] == emptyColor):\n return False\n if (j > 2) and (self.grid[i, j-1] == otherColor) and (self.grid[i, j-2] == emptyColor):\n return False\n if (j < self.grid.height-1) and (self.grid[i, j+1] == otherColor) and (self.grid[i, j+2] == emptyColor):\n return False\n return True", "def is_degenerate(ten):\n contains_nan = torch.isnan(ten).any()\n contains_inf = (ten == float(\"inf\")).any()\n\n return contains_nan or contains_inf", "def is_unoccupied(self) -> bool:\n return self.piece == Piece() # Piece() creates an \"empty-piece\"", "def EyeColorTest(str):\n\n\tvalidcolors = ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n\treturn str in validcolors", "def check_dead(cart):\n id = cart_to_loc(cart)\n return voxel_data[id] == 0", "def doesClear(self): \r\n return random.random() < self.clearProb", "def cell_is_usable(cell):\n\tmaxcolors = tile_size[0] * tile_size[1]\n\tcolors = cell.getcolors(maxcolors)\n\t\n\tis_usable = False\n\t\n\tfor color in colors:\t\n\t\tif len(color[1]) == 3:\n\t\t\tif sum(color[1]) == 765 and color[0] < maxcolors/2:\n\t\t\t\t# less than a half are white\n\t\t\t\tis_usable = True\n\t\telse:\n\t\t\tif color[1][3] == 255 and color[0] >= 0:\n\t\t\t\tis_usable = True\n\t\t\t\tbreak\t\n\treturn is_usable", "def check_notch(self):\n\n return self.pins[0] == self.notch", "def is_elevation(self):\n return not self._is_depth", "def any_neighbor_burning(self):\n neighbors = self.world.get_four_neighbors(self, Patch.null)\n states = [patch.state for patch in neighbors]\n return \"orange\" in states", "def is_unoccupied(self, row, col):\n return self.maze[row][col] is EMPTY", "def is_destroyed(self):\n if self._power == 0:\n return True", "def is_destroyed(self):\n if self._power == 0:\n return True\n return False", "def check_game_over(self):\n for piece in self.pieces:\n if not piece.destroyed:\n return False\n print(\"Signal.END\")\n return True", "def is_valid_eye_color(eye_color: str) -> str:\n return eye_color in [\"amb\", \"blu\", \"brn\", \"gry\", \"grn\", \"hzl\", \"oth\"]", "def is_exhausted(self):\n return random.random() < 0.5", "def has_exited(self):\n agents = self.board[self.agent_locs_idx]\n return agents & (CellTypes.agent | CellTypes.exit) == CellTypes.exit", "def discarded(self) -> bool:\n return (\n len(self.cards) == 13 - self.game.board.purple.space - self.discard_amount\n )", "def scratch(self) -> bool:\n hcell = self._get_hcell2()\n return \"scratch\" in hcell", "def __bool__(self):\n return not(self.outcome != 0 or self.filled)", "def check_game_end(self):\n\n return any([i != 0 for i in self.board[0]])", "def termialTest(state):\n if state.isWin() or state.isLose():\n return True\n return False", "def is_solved(self):\n colors = ['green', 'blue', 'red', 'orange', 'white', 'yellow']\n for row in range(3):\n for column in range(3):\n if self.front[row][column] != colors[0]:\n return False\n for row in range(3):\n for column in range(3):\n if self.back[row][column] != colors[1]:\n return False\n for row in range(3):\n for column in range(3):\n if self.right[row][column] != colors[2]:\n return False\n for row in range(3):\n for column in range(3):\n if self.left[row][column] != colors[3]:\n return False\n for row in range(3):\n for column in range(3):\n if self.up[row][column] != colors[4]:\n return False\n for row in range(3):\n for column in range(3):\n if self.down[row][column] != colors[5]:\n return False\n return True", "def check_seed():\n np.random.seed(1000)\n standard = [\n {0: -3.0, 1: -5.0, 'index': 0},\n {0: -6.0, 1: -8.0, 'index': 1},\n {0: 5.0, 1: -1.0, 'index': 2},\n {0: 1.0, 1: -7.0, 'index': 3},\n {0: -2.0, 1: -3.0, 'index': 4},\n {0: 7.0, 1: 3.0, 'index': 5},\n {0: -4.0, 1: -2.0, 'index': 6},\n {0: 2.0, 1: 6.0, 'index': 7}\n ]\n\n this_machine = create_points(8)\n\n flag = True\n for i in range(8) :\n flag &= this_machine[i][0] == standard[i][0] \n flag &= this_machine[i][1] == standard[i][1] \n flag &= this_machine[i][\"index\"] == i\n \n if not flag :\n print(\"\"\"\n The Python installation on this machine is odd: it appears to\n use a non-standard random number generator -- run \n this script on the machines in the Otter lab instead.\n If that fails too, send an email to ag0015@surrey.ac.uk.\n \"\"\")\n print (\"You got these test points:\", this_machine)\n print (\"You should have got:\", standard)\n exit(-1)\n else :\n print (\"Check passed\")", "def __suitIsLured(self, suitId, prevRound=0):\n inList = self.currentlyLuredSuits.has_key(suitId)\n if prevRound:\n # only return true if the suit has been lured for at least\n # one entire round\n return inList and self.currentlyLuredSuits[suitId][0] != -1\n return inList", "def is_not_tilted(self, channel=None):\n return not self.get_state(channel)", "def flush_udacity(hand):\n suits = [s for r,s in hand]\n return len(set(suits)) == 1", "def is_game_over(self):\n\n if len(self.next_pieces) == 0:\n return True", "def is_monochromatic(self):\n return equal(s.color for s in self.iter_states())", "def check_random_state(seed):\n if seed is None:\n return np.random.mtrand._rand\n\n elif isinstance(seed, int):\n return np.random.RandomState(seed)\n\n elif isinstance(seed, np.random.RandomState):\n return seed\n\n raise ValueError(\"Seed should be None, int or np.random.RandomState\")", "def still_in_hand(self):\n return len(self.hand.cards)!=0", "def deadTest(self, board):\n if board[0] and board[4] and board[8]:\n return True\n if board[2] and board[4] and board[6]:\n return True\n for i in range(3):\n #check every row\n row = i * 3\n if board[row] and board[row+1] and board[row+2]:\n return True\n #check every column\n if board[i] and board[i+3] and board[i+6]:\n return True\n return False", "def deadTest(self, board):\n if board[0] and board[4] and board[8]:\n return True\n if board[2] and board[4] and board[6]:\n return True\n for i in range(3):\n #check every row\n row = i * 3\n if board[row] and board[row+1] and board[row+2]:\n return True\n #check every column\n if board[i] and board[i+3] and board[i+6]:\n return True\n return False", "def is_repeat(self, state):\n\t\tif not self.state.repeats():\n\t\t\treturn False\n\t\treturn state.repeated_rep() in self.visitedStates", "def validPlayerColor(color):\n if color not in (RED, GREEN, BLUE, YELLOW):\n return False\n else:\n return True", "def validColor(color):\n if color not in (RED, GREEN, BLUE, YELLOW, EMPTY):\n return False\n else:\n return True", "def is_there_life(pixel):\n\treturn pixel[0] == 0", "def is_enemy(self, x, y, mycolor):\n piece = self.get_piece(x, y)\n if piece:\n return piece.color != mycolor\n return False", "def test_do_not_find_unconnected_element(self):\r\n \r\n # Adjacency List of graph G\r\n G = {}\r\n G[0] = [1, 2]\r\n G[1] = [0, 3]\r\n G[2] = [0, 3, 4]\r\n G[3] = [1, 2, 4, 5]\r\n G[4] = [2, 3, 5]\r\n G[5] = [4, 5]\r\n G[6] = [7]\r\n G[7] = [6]\r\n\r\n # Start node\r\n s = 0\r\n \r\n exploredList = BFS.BFS(G, s)\r\n expExploredList = {0:1, 1:1, 2:1, 3:1, 4:1, 5:1, 6:0, 7:0} # i.e. 6 & 7\r\n # unexplored\r\n\r\n self.assertEqual(expExploredList, exploredList)", "def isGameOver(self):\n for i in range(self.rows):\n for j in range(self.columns):\n if self.grid[i][j].face == 'down':\n return False\n #if here then all cards must be face up\n return True", "def is_finished(self)-> bool:\n for line in self.grid:\n for pawn in line:\n if pawn.color is None:\n return False\n return True", "def terminal_test(gameState):\n return len(gameState.get_legal_moves()) == 0", "def at_exit(piece, colour):\n return ((colour == 'red' and piece.q == BOARDDIM) or\n (colour == 'green' and piece.r == BOARDDIM) or\n (colour == 'blue' and piece.q + piece.r == -BOARDDIM))", "def verify_winner(self):\r\n return self.count_pegs() == 1", "def check_grid_full(self):\n for row in self.game_state:\n for e in row:\n if e is None:\n return False\n return True", "def _isred(self, node):\n \n if node is None:\n return False\n else:\n return node.color == Color.RED", "def validate_eye_color(passport: map) -> bool:\n if passport.get('ecl'):\n return passport['ecl'] in valid_eye_colors\n\n return False", "def stalemate(self):\n last_piece = self.pieces.moveHistory[-1][0]\n last_piece_color = self.pieces.piece_color(last_piece)\n if last_piece_color == \"black\":\n if len(self.whiteMoves) == 0:\n return True\n else:\n if len(self.blackMoves) == 0:\n return True\n return False", "def black_has_won(self):\n return not any([self.squares[p].is_black() for p in self.squares \\\n if self.squares[p]])", "def _check_random_state(seed):\n if seed is None or seed is np.random:\n return np.random.mtrand._rand\n if isinstance(seed, (numbers.Integral, np.integer)):\n return np.random.RandomState(seed)\n if isinstance(seed, np.random.RandomState):\n return seed\n raise ValueError('%r cannot be used to seed a numpy.random.RandomState'\n ' instance' % seed)", "def is_die_held_in_wrong_stage(self, die):\n if type(die) != type(die_class.Die()):\n raise TypeError(\"Expecting Die argument.\")\n if self.game_stage == 1:\n return die.current_value not in die.possible_values[0:2]\n if self.game_stage == 2:\n return die.current_value not in die.possible_values[2:4]\n if self.game_stage == 3:\n return die.current_value not in die.possible_values[4:6]", "def isTileCleaned(self, m, n):\n return self.tiles[n][m] == True", "def in_check(self, colour):\n from pieces import King, Queen, Bishop, Knight, Rook, Pawn\n king_coord = self.white_king_coord if colour is WHITE else self.black_king_coord\n if king_coord is None:\n return False\n for piece_class in King, Queen, Bishop, Knight, Rook, Pawn:\n dummy_piece = piece_class(colour = colour)\n for coord_to in dummy_piece.iter_move_coords(self, king_coord):\n piece = self[coord_to]\n if isinstance(piece, piece_class) and piece.colour != colour:\n return True\n return False", "def isGameOver(self):\n for row in range(0, self.rows):\n for col in range(0, self.cols):\n if self.isMine(row, col) and self.isClicked(row, col):\n return True\n return False", "def not_finish(board: Board) -> bool:\n\n if argv[2] is '0':\n return 0 in flatten(board)\n else:\n return (0 in flatten(board) or\n 0 in flatten(flush_values(board, 2)) or\n 0 in flatten(flush_values(board, 4)))", "def test_nonsense(self):\n with self.assertRaises(ValueError):\n ESN(N_in,N_out,random_state=-1)\n\n with self.assertRaises(Exception) as cm:\n ESN(N_in,N_out,random_state=0.5)\n self.assertIn(\"Invalid seed\",str(cm.exception))", "def has_enemy_piece(self, piece) -> bool:\r\n if self.has_piece():\r\n if piece.get_color() != self.get_piece().get_color():\r\n return True\r\n \r\n return False", "def is_game_finish(self):\n for row in self.chessboard:\n for val in row:\n if not val.is_correct_state():\n return False\n return True", "def check_random_state(self, seed):\n if seed is None or seed is np.random:\n return np.random.mtrand._rand\n if isinstance(seed, (numbers.Integral, np.integer)):\n return np.random.RandomState(seed)\n if isinstance(seed, np.random.RandomState):\n return seed\n raise ValueError('%r cannot be used to seed a numpy.random.RandomState instance' % seed)", "def has_flush(self):\n self.suit_hist()\n for val in self.suits.values():\n if val >= 5:\n return True\n return False", "def __bool__(self):\n return len(self._states_) > 0", "def check_random_state(seed):\n if seed is None or seed is np.random:\n return np.random.mtrand._rand\n if isinstance(seed, (int, np.integer)):\n return np.random.RandomState(seed)\n if isinstance(seed, np.random.RandomState):\n return seed\n raise ValueError('%r cannot be used to seed a numpy.random.RandomState'\n ' instance' % seed)", "def is_black(self):\n return self.__e", "def check_if_solved(self):\n for cell in self.board.values():\n if not cell.value:\n return False\n return True", "def _is_fail(self):\n failed = False\n for obj in self.world_state.objects:\n failed = failed or obj.lost\n return failed", "def check_random_state(seed):\n if seed is None or seed is np.random:\n return np.random.mtrand._rand\n if isinstance(seed, (numbers.Integral, np.integer)):\n return np.random.RandomState(seed)\n if isinstance(seed, np.random.RandomState):\n return seed\n raise ValueError('%r cannot be used to seed a numpy.random.RandomState'\n ' instance' % seed)", "def check_random_state(seed):\n if seed is None or seed is np.random:\n return np.random.mtrand._rand\n if isinstance(seed, (numbers.Integral, np.integer)):\n return np.random.RandomState(seed)\n if isinstance(seed, np.random.RandomState):\n return seed\n raise ValueError('%r cannot be used to seed a numpy.random.RandomState'\n ' instance' % seed)", "def check_random_state(seed):\n if seed is None or seed is np.random:\n return np.random.mtrand._rand\n if isinstance(seed, (numbers.Integral, np.integer)):\n return np.random.RandomState(seed)\n if isinstance(seed, np.random.RandomState):\n return seed\n raise ValueError('%r cannot be used to seed a numpy.random.RandomState'\n ' instance' % seed)", "def step_empty(self):\n if random.random() < self.world.p:\n self.set_state(\"green\")", "def death_test(snake, board):\r\n if snake.snake[-1][0] >= len(board.board[0]) or snake.snake[-1][1] >= len(board.board) or snake.snake[-1][0] < 0 or snake.snake[-1][1] < 0: \r\n return True\r\n else:\r\n for i in snake.snake[:-1]:\r\n if i[:-1] == snake.snake[-1][:-1]: \r\n return True\r\n return False", "def random_test(self, source):\r\n ret = 1\r\n for seed in range(1, 40):\r\n if source.run(temp_params={\"fitness_function\": (lambda x: -np.sum(x)**2+10),\r\n \"population_size\": 10,\r\n \"time_constraint\": 2,\r\n \"axes\": [(0, 5)],\r\n \"seed\": seed}) != \\\r\n source.run(temp_params={\"fitness_function\": (lambda x: -np.sum(x) ** 2 + 10),\r\n \"population_size\": 10,\r\n \"time_constraint\": 2,\r\n \"axes\": [(0, 5)],\r\n \"seed\": seed}):\r\n ret = 0\r\n if ret == 0:\r\n if self.verbosity > 0:\r\n print(\"ERROR: Random seed non functional, results cannot be replicated.\")\r\n return 0\r\n else:\r\n if self.verbosity > 1:\r\n print(\"Random seed functional, results replicable if a seed is used.\")\r\n return 1", "def isOpen(self):\n return self.analyzed_digest != {}", "def check_lost (grid):\r\n for row in range(4):\r\n for col in range(4):\r\n if grid[row][col]==0:\r\n return False\r\n if grid[0][0]==grid[0][1] or grid[0][0]==grid[1][0]:\r\n return False \r\n if grid[0][3]==grid[0][2] or grid[0][3]==grid[1][3]:\r\n return False \r\n if grid[3][0]==grid[2][0] or grid[3][0]==grid[3][1]:\r\n return False\r\n if grid[3][3]==grid[2][3] or grid[3][3]==grid[3][2]:\r\n return False \r\n if grid[0][1]==grid[0][2] or grid[0][1]==grid[1][1]:\r\n return False \r\n if grid[0][2]==grid[1][2]:\r\n return False \r\n if grid[1][1]==grid[2][1] or grid[1][1]==grid[1][2] or grid[1][1]==grid[1][0]:\r\n return False\r\n if grid[2][1]==grid[2][0] or grid[2][1]==grid[2][2] or grid[2][1]==grid[3][1]:\r\n return False \r\n if grid[1][0]==grid[2][0]:\r\n return False\r\n if grid[1][2]==grid[1][3] or grid[1][2]==grid[2][2]:\r\n return False\r\n if grid[2][2]==grid[2][3] or grid[2][2]==grid[3][2]:\r\n return False\r\n if grid[3][1]==grid[3][2]:\r\n return False\r\n else:\r\n return True", "def is_not_final(cls, state):\n return state in cls._not_final_states", "def is_miss_deal(hand: list, mighty: Card) -> bool:\n point_card_count = 0\n for card in hand:\n if card.is_pointcard() and card != mighty:\n point_card_count += 1\n\n if point_card_count <= 1:\n return True\n else:\n return False", "def _check_random_state(self, seed: Union[None, int]) -> None:\n\n if seed is None:\n return np.random.mtrand._rand\n\n elif type(seed) == int:\n return np.random.RandomState(seed)\n\n raise ValueError(f'Seed {seed} must be None or an integer.')", "def damaged(self) -> bool:\n return len(self._damaged_cells) > 0", "def check_tie(board):\n return 0 not in board[0]", "def check(self, grain=50):\r\n opengles.glDisable(GL_SCISSOR_TEST)\r\n self.s_flg = False\r\n opengles.glReadPixels(0, self.y0, self.ix, 1,\r\n GL_RGB, GL_UNSIGNED_BYTE,\r\n ctypes.byref(self.img))\r\n r0 = self.img[0:3]\r\n for i in xrange(0, self.img_sz, self.step):\r\n if self.img[i:(i+3)] != r0:\r\n return True\r\n\r\n return False", "def is_unrolled_out_leaf(self, game):\n return self._plays[game] == 0", "def is_legal(vtx, color):\n for neighbor in vtx[\"adjacent\"]:\n if VERTICES[neighbor][\"color\"] is color:\n return False\n return True", "def check_win(self):\n for pos in self.win_set:\n s = set([self.grid[p] for p in pos])\n if len(s) == 1 and (0 not in s):\n return True\n return False", "def game_won(self):\n\n # Makes sure every tile is colored,\n for column in self.board:\n for tile in column:\n if not tile.color:\n return False\n\n # Makes sure each color has a line.\n colors = set()\n for dot in self.dots:\n dot_tile = self.board[dot.x][dot.y]\n colors.add(dot.color)\n for dot in self.dots:\n dot_tile = self.board[dot.x][dot.y]\n # If we've already found a line for this color.\n if dot.color not in colors:\n continue\n # If this dot starts a line and ends at the other dot.\n if dot_tile.next and not dot_tile.line_end().is_dot:\n return False\n elif dot_tile.next:\n colors.remove(dot.color)\n # If colors isn't empty, not all colors have lines.\n return not colors", "def IsSkipped(self):\n state = self.GetState()\n return state.status == TestState.SKIPPED", "def was_used(self):\r\n return self.circ_chosen != 0", "def is_dead(self):\n return self.hearts <= 0", "def check_for_draw(self, current_board):\r\n init_board = [0, 1, 2, 3, 4, 5, 6, 7, 8]\r\n if any(i in current_board for i in init_board):\r\n return False\r\n else:\r\n return True" ]
[ "0.6756834", "0.6756834", "0.6691525", "0.6599388", "0.61974424", "0.6161359", "0.5891919", "0.5860734", "0.58116674", "0.5756278", "0.572533", "0.5714277", "0.57110393", "0.5691943", "0.5688913", "0.56780785", "0.5637771", "0.559989", "0.556628", "0.5554336", "0.55459327", "0.5529324", "0.5527496", "0.5505802", "0.54989314", "0.5488496", "0.5479259", "0.5477626", "0.54714364", "0.5452168", "0.54520905", "0.54430413", "0.54415363", "0.54395074", "0.54294205", "0.5423721", "0.5413849", "0.54134965", "0.54039115", "0.540384", "0.5397803", "0.5378415", "0.5374311", "0.5366198", "0.53594905", "0.53594905", "0.5352495", "0.5351622", "0.5349583", "0.5338675", "0.5324374", "0.53183866", "0.5312241", "0.5309243", "0.5305196", "0.5302393", "0.5289817", "0.5288482", "0.52766687", "0.5273454", "0.5269638", "0.5267135", "0.5264955", "0.52641404", "0.52605397", "0.5259753", "0.52530795", "0.5250448", "0.52446216", "0.52438915", "0.524206", "0.5234837", "0.5234274", "0.52330625", "0.5230319", "0.5229473", "0.5220811", "0.5219876", "0.5217229", "0.5217229", "0.5217229", "0.5209764", "0.5209534", "0.52033854", "0.5201743", "0.5201505", "0.51894903", "0.51879734", "0.5187642", "0.51843154", "0.51833457", "0.51833045", "0.5182355", "0.51768124", "0.51716614", "0.51699215", "0.51648575", "0.51647705", "0.5151485", "0.5149446" ]
0.62459785
4
Look for and return any unexplored point including the given seed. Calling map.find_above(MSS) after map.block_down(MSS) will thus find strict supersets of the MSS, as the MSS itself has been blocked.
def find_above(self, seed): superset_exists = self.solver.solve((i + 1) for i in seed) if superset_exists: return self.get_seed() else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_seed(self, seed):\n out = self.complement(seed)\n return self.solver.solve([(i + 1) for i in seed] + [-(i + 1) for i in out])", "def choose_pos(self):\n s = self\n\n availablepos = []\n for dblock in s.pjs.dblocks:\n is_available = True\n\n for powerup in s.pjs.powerups:\n if powerup.rects[0].overlap(dblock.rects[0]):\n is_available = False\n break\n\n if is_available:\n availablepos.append(dblock.rpos)\n\n pos = random.randint(0, len(availablepos) - 1)\n s.rpos = availablepos[pos]", "def get_mask_with_stent_likely_positions(data, th, verbose=False):\n \n # NOTE: this pure-Python implementation is little over twice as slow\n # as the Cython implementation, which is a neglectable cost since\n # the other steps in stent segmentation take much longer. By using\n # pure-Python, installation and modification are much easier!\n # It has been tested that this algorithm produces the same results\n # as the Cython version.\n \n # Init mask\n mask = np.zeros_like(data, np.uint8)\n \n # Criterium 1A: voxel must be above th\n # Note that we omit the edges\n #mask[25:-25,25:-25,25:-25] = (data[25:-25,25:-25,25:-25] > th[0]) * 3\n mask[1:-1,1:-1,1:-1] = (data[1:-1,1:-1,1:-1] > th[0]) * 3\n \n cnt = 0\n seed = None\n seeds = []\n values = []\n for z, y, x in zip(*np.where(mask==3)):\n \n # Only proceed if this voxel is \"free\"\n if mask[z,y,x] == 3:\n \n # Set to 0 initially\n mask[z,y,x] = 0 \n \n # Get value\n val = data[z,y,x]\n \n # Get maximum of neighbours\n patch = data[z-1:z+2, y-1:y+2, x-1:x+2].copy()\n patch[1,1,1] = 0\n themax = patch.max()\n \n # # Criterium 2: must be local max\n # if themax > val:\n # continue\n # # Also ensure at least one neighbour to be *smaller*\n # if (val > patch).sum() == 0:\n # continue\n \n # Criterium 3: one neighbour must be above th\n if themax <= th[0]:\n continue\n \n # Criterium 1B: voxel must be below upper seed th, if given\n if len(th) ==2:\n if val > th[1]:\n if verbose:\n print('Seed removed by higher th: ',(z,y,x),'ctvalue=', val)\n continue\n \n # # Criterium 4: seed must be at least 5 voxels away from other seeds\n # if not seed is None:\n # newseed = np.asarray([z,y,x])\n # v = seeds - newseed\n # d = (v[:,0]**2 + v[:,1]**2 + v[:,2]**2)**0.5 # np.linalg.norm(v) # magnitude\n # if d.min() < 5:\n # cnt+=1\n # continue\n seed = np.asarray([z,y,x])\n seeds.append(seed)\n \n # Set, and suppress stent points at direct neighbours\n #mask[z-1:z+2, y-1:y+2, x-1:x+2] = 1 # do not suppress neighbours to have more points for centerline\n mask[z,y,x] = 2\n values.append(data[z,y,x])\n \n print()\n # print('Seed ctvalues: {}'.format(sorted(values)))\n print('-------')\n # print('Seeds removed by criterium 4: {}'.format(cnt))\n \n return mask", "def Step1(self):\n import random\n print('get mask for seedpoints NELLIX is used')\n # Check if we can go\n if self._vol is None or self._params is None:\n raise ValueError('Data or params not yet given.')\n \n t0 = time.time()\n \n # Detect points\n th = self._params.seed_threshold\n pp = get_stent_likely_positions(self._vol, th) # call below\n \n # Create nodes object from found points\n nodes = stentgraph.StentGraph()\n for p in pp:\n p_as_tuple = tuple(p.flat) # todo: perhaps seed detector should just yield list of tuples.\n nodes.add_node(p_as_tuple)\n \n t1 = time.time()\n if self._verbose:\n print()\n print('Found %i seed points, which took %1.2f s.' % (len(nodes), t1-t0))\n \n # Store the nodes\n self._nodes1 = nodes\n \n # Draw?\n if self._draw:\n self.Draw(1)\n \n return nodes", "def get_furthest_offgrid_pin(self, pin, insufficient_list):\n \n # Find the coordinate with the most overlap\n best_coord = None\n best_dist = math.inf\n for coord in insufficient_list:\n min_dist = grid_utils.distance_set(coord, self.blocked_grids)\n if min_dist<best_dist:\n best_dist=min_dist\n best_coord=coord\n \n return set([best_coord])", "def get_stent_likely_positions(data, th):\n \n # Get mask\n mask = get_mask_with_stent_likely_positions(data, th)\n \n # Convert mask to points\n indices = np.where(mask==2) # Tuple of 1D arrays\n pp = PointSet( np.column_stack(reversed(indices)), dtype=np.float32)\n \n # Correct for anisotropy and offset\n if hasattr(data, 'sampling'):\n pp *= PointSet( list(reversed(data.sampling)) ) \n if hasattr(data, 'origin'):\n pp += PointSet( list(reversed(data.origin)) ) \n \n return pp", "def find_basin(self, s):\n \n assert s.size==self.n\n atMin = False\n thisState = s.astype(np.int8)\n\n while not atMin: \n dE = self.neighbor_dE(thisState)\n if np.any( dE<0 ):\n ix = dE.argmin()\n thisState[ix] *= -1\n else:\n atMin = True\n return thisState", "def _getPosLock( self, bSeed ):\n\n\t\treturn ( bSeed & 0xFF )", "def find_unsettled_spot(self):\n\t\tfor i in range(9):\n\t\t\tfor j in range(9):\n\t\t\t\tif self.grid[i][j] == 0:\n\t\t\t\t\treturn i, j\n\t\treturn", "def get_seed_points(img,seed_values):\n\n m,n = img.shape\n coordinates = [(i,j) for i,j in it.product(range(m),range(n)) if img[i,j] in seed_values]\n\n return coordinates", "def test_find_best_W_mers_2(self):\n self.options.min_num_sites = self.options.max_num_sites = num_to_find = 2\n \n # load data and create STEME object\n fasta_file = os.path.normpath(get_fasta_file('T00759-small.fa'))\n \n #\n # Load sequences and build index\n #\n algorithm = stempy.Algorithm(self.options)\n algorithm._initialise(fasta_file)\n data = algorithm.input_sequences.data\n\n for seed in (\n 'ATGCAGAAAAATTAAG',\n 'TTTAAAATACTTTAAA',\n ):\n # create and seed a model\n W = len(seed)\n model = algorithm.create_model_of_input(W)\n model.bs.seed(seed, True)\n model.set_lambda_for_sites(data.num_sequences)\n \n # look for best W-mers under model\n best_w_mer_finder = stempy.create_best_w_mer_finder(data, model, num_to_find)\n best_w_mer_finder()\n avg_Z = 0.\n for _eval in best_w_mer_finder.best_w_mers:\n logging.info(\n 'Seed: %s; Site: %s; p(binding): %.2e; p(not binding): %.2e',\n seed, data.get_W_mer(W, _eval.global_pos), _eval.Z, 1.-_eval.Z\n )\n avg_Z += _eval.Z\n logging.info('Seed: %s; Average Z: %.6f', seed, avg_Z / len(best_w_mer_finder.best_w_mers))\n \n #\n # Check we found the seed\n #\n for _eval in best_w_mer_finder.best_w_mers:\n if data.get_W_mer(W, _eval.global_pos) == seed:\n break\n else:\n raise RuntimeError('Could not find seed in best W-mers')\n \n #\n # Log the product of p-values\n #\n best_w_mer_finder.update_model(num_to_find, use_pseudo_counts=False)\n logging.info('Seed: %s; log PoP: %.6f', seed, algorithm.significance.log_product_p_values(model))", "def runmaxmin(self):\n import random\n random.seed(self.seed)\n mindist_ptolandmarkset = np.full(self.pointcloud.size, np.inf)\n self.subsetindices = []\n for i in xrange(self.subsetsize):\n if i == 0:\n selected_index = random.randint(0, self.pointcloud.size - 1)\n # update min for all the rest indices\n # update min for this index to 0.\n for z in xrange(self.pointcloud.size):\n # if z == selected_index:\n # mindist_ptolandmarkset[z] = 0.0\n # else:\n mindist_ptolandmarkset[z] = self.pointcloud.distmat[selected_index][z]\n else:\n selected_index = np.argmax(mindist_ptolandmarkset)\n # update minimum distance for all points\n for z in xrange(self.pointcloud.size):\n mindist_ptolandmarkset[z] = min(mindist_ptolandmarkset[z],\n self.pointcloud.distmat[selected_index][z])\n\n self.subsetindices.append(selected_index)\n\n self.subsetpointcloud = pc.PointCloud(self.pointcloud.points[self.subsetindices])", "def get_nearest_offgrid_pin(self, pin, insufficient_list):\n # Find the coordinate with the most overlap\n best_coord = None\n best_dist = math.inf\n for coord in insufficient_list:\n track_pin = self.convert_track_to_pin(coord)\n min_dist = pin.distance(track_pin)\n if min_dist<best_dist:\n best_dist=min_dist\n best_coord=coord\n \n return set([best_coord])", "def FindPredeccesor(self, id):\r\n node = self.nodeInfo\r\n while True:\r\n succNode = self.RemoteGetSuccessor(node.Address)\r\n if self.IsInRange(id, node.HashValue, False,succNode.HashValue, True) == False:\r\n node = self.RemoteClosestPrecedingFinger(node.Address, id)\r\n else:\r\n break\r\n return node", "def find_sandwich_top_below(blk):\n if blk.name in ['sandwichtop', 'sandwichtop_no_label',\n 'sandwichtop_no_arm', 'sandwichtop_no_arm_no_label']:\n return blk\n # Always follow the main branch of a flow: the last connection.\n _blk = blk.connections[len(blk.connections) - 1]\n while _blk is not None:\n if _blk.name in ['sandwichtop', 'sandwichtop_no_label',\n 'sandwichtop_no_arm', 'sandwichtop_no_arm_no_label']:\n return _blk\n _blk = _blk.connections[len(_blk.connections) - 1]\n return None", "def ClosestPrecedingFinger(self, id):\r\n for i in range(M_BITS, 0, -1):\r\n if self.IsInRange(self.fingerTable[i].Node.HashValue, self.nodeInfo.HashValue, False, id, False):\r\n return self.fingerTable[i].Node\r\n return self.nodeInfo", "def searchDeadEnd(self):\n boundaries = []\n if not self.red:\n i = self.midWidth - 1\n else:\n i = self.midWidth + 1\n boudaries = [(i, j) for j in range(self.height)]\n validPositions = []\n for i in boudaries:\n if not (i[0], i[1]) in self.walls:\n validPositions.append(i)\n\n dangerPos = []\n\n toExpand = self.scanmap.twoEntryPoints()\n for (x,y) in toExpand:\n adjacent = self.scanmap.adjacentValidPoints(x, y)\n if not (x,y) in dangerPos:\n for (u, w) in adjacent:\n visited = []\n visited.append((x, y))\n safe = False\n danger = False\n DFS = util.Stack()\n DFS.push((u,w))\n while not safe and not danger:\n (i,j) = DFS.pop()\n visited.append((i,j))\n adjacents = self.scanmap.adjacentValidPoints(i,j)\n for position in adjacents:\n if not position in visited:\n DFS.push(position)\n if DFS.isEmpty():\n danger = True\n dangerPos = list(set(dangerPos) | set(visited))\n\n if (i,j) in validPositions:\n safe = True\n oneEntry = self.scanmap.oneEntryPoints()\n dangerPos = list(set(oneEntry).union(set(dangerPos)))\n dangerPos.sort()\n return dangerPos", "def find_sandwich_top(blk):\n # Always follow the main branch of a flow: the first connection.\n _blk = blk.connections[0]\n while _blk is not None:\n if _blk.name in COLLAPSIBLE:\n return None\n if _blk.name in ['repeat', 'if', 'ifelse', 'forever', 'while']:\n if blk != _blk.connections[len(_blk.connections) - 1]:\n return None\n if _blk.name in ['sandwichtop', 'sandwichtop_no_label',\n 'sandwichtop_no_arm', 'sandwichtop_no_arm_no_label']:\n return _blk\n blk = _blk\n _blk = _blk.connections[0]\n return None", "def maximize_seed(self, seed, direction):\n while True:\n comp = self.complement(seed)\n x = self.solver.new_var() + 1\n if direction:\n # search for a solution w/ all of the current seed plus at\n # least one from the current complement.\n self.solver.add_clause([-x] + [i + 1 for i in comp]) # temporary clause\n # activate the temporary clause and all seed clauses\n havenew = self.solver.solve([x] + [i + 1 for i in seed])\n else:\n # search for a solution w/ none of current complement and at\n # least one from the current seed removed.\n self.solver.add_clause([-x] + [-(i + 1) for i in seed]) # temporary clause\n # activate the temporary clause and deactivate complement clauses\n havenew = self.solver.solve([x] + [-(i + 1) for i in comp])\n self.solver.add_clause([-x]) # remove the temporary clause\n\n if havenew:\n seed = self.get_seed()\n else:\n return seed", "def problem2(self, s):\n \n points = self.neighbor(100, 10, s.exhaustive_search)\n points += self.neighbor(10, 100, s.exhaustive_search)\n points += 1\n\n _testDriver.get_code(s.exhaustive_search)\n print \"\\n(Check that scipy.spatial.KDTree is not used)\"\n points *= self.grade(1)\n\n return points", "def closestScaredGhost(pos, scaredGhosts, walls):\n fringe = [(pos[0], pos[1], 0)]\n expanded = set()\n while fringe:\n pos_x, pos_y, dist = fringe.pop(0)\n if (pos_x, pos_y) in expanded:\n continue\n expanded.add((pos_x, pos_y))\n # if we find a scared ghost at this location then exit\n for ghostPosition in scaredGhosts: # Check if collision\n if manhattanDistance( ghostPosition, (pos_x, pos_y) ) <= COLLISION_TOLERANCE:\n return dist\n # otherwise spread out from the location to its neighbours\n nbrs = Actions.getLegalNeighbors((pos_x, pos_y), walls)\n for nbr_x, nbr_y in nbrs:\n fringe.append((nbr_x, nbr_y, dist+1))\n # no scared ghost found\n return None", "def getStartingDistrict(self):\n district = None\n\n if len(self.activeDistrictMap.keys()) == 0:\n self.notify.info('no shards')\n return None\n\n if base.fillShardsToIdealPop:\n # Choose highest-population shard that is not yet\n # a 'high-population' shard\n lowPop, midPop, highPop = base.getShardPopLimits()\n self.notify.debug('low: %s mid: %s high: %s' %\n (lowPop, midPop, highPop))\n for s in self.activeDistrictMap.values():\n if s.available and s.avatarCount < lowPop:\n self.notify.debug('%s: pop %s' %\n (s.name, s.avatarCount))\n if district is None:\n district = s\n else:\n # if multiple shards have the same population,\n # sort them by name so that all clients will\n # choose the same one\n if s.avatarCount > district.avatarCount or (\n (s.avatarCount == district.avatarCount and\n s.name > district.name)\n ):\n district = s\n\n # if all of the shards are over the cutoff population, pick\n # the lowest-population shard\n if district is None:\n self.notify.debug(\n 'all shards over cutoff, picking lowest-population shard')\n for s in self.activeDistrictMap.values():\n if s.available:\n self.notify.debug('%s: pop %s' %\n (s.name, s.avatarCount))\n if (district is None or\n (s.avatarCount < district.avatarCount)):\n district = s\n\n if district is not None:\n self.notify.debug('chose %s: pop %s' % (district.name, district.avatarCount))\n return district", "def FindClosestInsertedPoint(self, ):\n ...", "def cluster(M, point, eps): # zwraca punkty dla ktorych dystans z punktu point jest mniejszy od eps\n seeds = []\n for i in range(0, M.shape[0]):\n if eps_neighborhood(M, point, i, eps):\n seeds.append(i)\n return seeds", "def get_best_offgrid_pin(self, pin, insufficient_list):\n # Find the coordinate with the most overlap\n best_coord = None\n best_overlap = -math.inf\n for coord in insufficient_list:\n full_pin = self.convert_track_to_pin(coord)\n # Compute the overlap with that rectangle\n overlap_rect=pin.compute_overlap(full_pin)\n # Determine the min x or y overlap\n min_overlap = min(overlap_rect)\n if min_overlap>best_overlap:\n best_overlap=min_overlap\n best_coord=coord\n \n return set([best_coord])", "def naive_consensus_search(Ts, m):\n k = len(Ts)\n\n bsf_radius = np.inf\n bsf_Ts_idx = 0\n bsf_subseq_idx = 0\n\n for j in range(k):\n radii = np.zeros(len(Ts[j]) - m + 1)\n for i in range(k):\n if i != j:\n mp = naive.stump(Ts[j], m, Ts[i])\n radii = np.maximum(radii, mp[:, 0])\n min_radius_idx = np.argmin(radii)\n min_radius = radii[min_radius_idx]\n if min_radius < bsf_radius:\n bsf_radius = min_radius\n bsf_Ts_idx = j\n bsf_subseq_idx = min_radius_idx\n\n return bsf_radius, bsf_Ts_idx, bsf_subseq_idx", "def look_ahead(self, point):\n directions = [N(Point.make(point)), S(Point.make(point)), E(Point.make(point)), W(Point.make(point))]\n for point in directions:\n if not point in self.nodes:\n return True\n return False", "def targetpoint(self, initpoint):\n while True:\n col = int(random.uniform(0, COLS))\n row = int(random.uniform(0, ROWS))\n if (row, col) != initpoint:\n break\n return (row, col)", "def __findFarestPoint__( self, outPoint ):\n end = outPoint;\n endInside = self.inside( end );\n if endInside: return outPoint;\n start = self.center;\n startInside = self.inside( start );\n \n while( True ):\n if ( utility.euclideanDistSqr( start, end ) <= 4 ):\n return start;\n mid = utility.devide( utility.add( start, end ), 2);\n if self.inside( mid ):\n start = mid;\n else:\n end = mid;", "def _get_nearest_neighbor(self, sample):\n d_min=float('inf') #minimum distance\n node_neighbor=self.start\n\n for iter in self.start:\n d=0 #distance between sample and each node in the node tree\n for j in range(sample.size):\n d+=(iter.state[j]-sample[j])**2\n if(d<d_min):\n d_min=d\n node_neighbor=iter\n\n return node_neighbor", "def has_uniform_seed_margin(self, seed_margin=20.0):\n margin = np.ceil(np.reciprocal(np.array(CONFIG.volume.resolution),\n dtype=np.float64) * seed_margin).astype(np.int64)\n\n mask_target = self.label_mask\n # If data is unlabeled, can not test so always succeed.\n if mask_target is None:\n return True\n # Seed location in the mask accounting for offset of label from image.\n ctr = self.seed - (np.asarray(self.image.shape) - np.asarray(mask_target.shape)) // 2\n seed_fov = (ctr - margin, ctr + margin + 1)\n seed_region = mask_target[seed_fov[0][0]:seed_fov[1][0],\n seed_fov[0][1]:seed_fov[1][1],\n seed_fov[0][2]:seed_fov[1][2]]\n return np.all(seed_region)", "def blocks_ahead_of_pacman(self, dx, dy):\n\n # Here's where we want to move to\n x = self.rect.x + dx\n y = self.rect.y + dy\n\n # Find integer block pos, using floor (so 4.7 becomes 4)\n # ix, iy = int(x // BLOCK_SIZE), int(y // BLOCK_SIZE)\n # # Remainder let's us check adjacent blocks\n # rx, ry = x % BLOCK_SIZE, y % BLOCK_SIZE\n\n # blocks = [world[iy][ix]]\n # if rx: blocks.append(world[iy][ix + 1])\n # if ry: blocks.append(world[iy + 1][ix])\n # if rx and ry: blocks.append(world[iy + 1][ix + 1])\n\n #return blocks\n return None", "def initLocalBestChoice(self):\n random.seed()\n return", "def find_list_for_old_point(self, point):\n target = hash_graphics_point(point)\n for i, markers in enumerate(self._points):\n hashes = [hash_graphics_point(x) for x in markers]\n if target in hashes:\n return i\n\n return None", "def _findMinNode(self, s):\n\n minNode = None\n minVal = self.inf\n for vertex in s:\n if self.dist[vertex] < minVal:\n minVal = self.dist[vertex]\n minNode = vertex\n return minNode", "def find(self, mu):\n for k, muk in enumerate(self.mu_db):\n if self.norm(muk - mu) == 0.0:\n ind = k+self.offset\n return ind, self.which[k]\n return None, None", "def FindClosestPoint(self, ):\n ...", "def _positive_ancestor(self, refindex, seedindex):\n \n key = (len(self.row_names)*refindex) + seedindex\n if key in self.cache:\n return self.cache[key]\n \n refdata = self.data[refindex]\n result = self._find_positive_ancestor(refdata, seedindex)[0]\n self.cache[key] = result\n return result", "def find_closest_edge_slow(self, datum):\n return self._find_closest_shape_in_list(self.edges(), datum)", "def check_seed():\n np.random.seed(1000)\n standard = [\n {0: -3.0, 1: -5.0, 'index': 0},\n {0: -6.0, 1: -8.0, 'index': 1},\n {0: 5.0, 1: -1.0, 'index': 2},\n {0: 1.0, 1: -7.0, 'index': 3},\n {0: -2.0, 1: -3.0, 'index': 4},\n {0: 7.0, 1: 3.0, 'index': 5},\n {0: -4.0, 1: -2.0, 'index': 6},\n {0: 2.0, 1: 6.0, 'index': 7}\n ]\n\n this_machine = create_points(8)\n\n flag = True\n for i in range(8) :\n flag &= this_machine[i][0] == standard[i][0] \n flag &= this_machine[i][1] == standard[i][1] \n flag &= this_machine[i][\"index\"] == i\n \n if not flag :\n print(\"\"\"\n The Python installation on this machine is odd: it appears to\n use a non-standard random number generator -- run \n this script on the machines in the Otter lab instead.\n If that fails too, send an email to ag0015@surrey.ac.uk.\n \"\"\")\n print (\"You got these test points:\", this_machine)\n print (\"You should have got:\", standard)\n exit(-1)\n else :\n print (\"Check passed\")", "def neighbor(self,s):\n jump=20\n while True:\n s+=random.randint(-1*jump,jump)\n if s < pow(10,5) and s > pow(10,-5):return s", "def closest_point(point, points):\n return points[cdist([point], points).argmin()]", "def df_search(grid, level):\n states_we_have_seen_before = Set(grid)\n\n def recur(inner_grid, itter, level):\n counter = 0\n next_states = Set()\n\n for gg in legal_moves(inner_grid):\n if gg not in states_we_have_seen_before:\n states_we_have_seen_before.add(gg)\n next_states.add(gg)\n\n for t in next_states:\n if match_level(t, level):\n return (size * size * size - itter, t)\n\n if itter > 0:\n for t in next_states:\n r = recur(t, itter - 1, level)\n if r:\n return r\n return None\n\n return recur(grid, size * size * size, level)", "def find_point(self, point: Point):\n for internal_point in self.points:\n if internal_point == point:\n return internal_point\n return None", "def find_closest_face_slow(self, datum):\n return self._find_closest_shape_in_list(self.faces(), datum)", "def nearest_neighbor(self, xRand):\n # TODO: Make this more efficient?\n #within a neighborhood of XRand, determine the lowest cost to go\n minCost = np.inf\n minNode = None\n\n for node in self.Tree:\n\n cost = self.compute_dist(node.state_time[0:6], xRand)\n\n if cost < minCost:\n minNode = node\n minCost = cost\n\n return minNode", "def nearest_neighbor(self, xRand):\n # TODO: Make this more efficient?\n #within a neighborhood of XRand, determine the lowest cost to go\n minCost = np.inf\n minNode = None\n\n for node in self.Tree:\n\n cost = self.compute_dist(node.state_time[0:6], xRand)\n\n if cost < minCost:\n minNode = node\n minCost = cost\n\n return minNode", "def find(self, mu):\n for k, muk in enumerate(self.mu_db):\n if self.norm(muk - mu) == 0.0:\n return k+self.offset\n return None", "def sgr_segmentation(img,seed_values,error_threshold=5):\n\n img_copy = np.copy(img)\n\n m,n = img_copy.shape\n\n seed_points = get_seed_points(img_copy,seed_values)\n\n vis = [[False for j in range(n)] for i in range(m)]\n\n for i,j in seed_points:\n if vis[i][j] == False:\n dfs_sgr_segmentation(i,j,img_copy,vis,error_threshold,m,n)\n\n for i,j in it.product(range(m),range(n)):\n if img_copy[i,j] not in seed_values:\n img_copy[i,j] = 0\n\n return img_copy", "def dijkstra(self, seed=0):\n import heapq\n if hasattr(seed, '__iter__') == False:\n seed = [seed]\n try:\n if (self.weights < 0).any():\n raise ValueError('some weights are non-positive')\n except:\n raise ValueError('undefined weights')\n dist, active = np.inf * np.ones(self.V), np.ones(self.V)\n idx, neighb, weight = self.compact_neighb()\n dist[seed] = 0\n dg = list(zip(np.zeros_like(seed), seed))\n heapq.heapify(dg)\n for j in range(self.V):\n end = False\n while True:\n if len(dg) == 0:\n end = True\n break\n node = heapq.heappop(dg)\n if active[node[1]]:\n break\n if end:\n break\n dwin, win = node\n active[win] = False\n # the folllowing loop might be vectorized\n l = neighb[idx[win]: idx[win + 1]]\n newdist = dwin + weight[idx[win]: idx[win + 1]]\n who = newdist < dist[l]\n for z in zip(newdist[who], l[who]):\n heapq.heappush(dg, z)\n dist[l[who]] = newdist[who]\n return dist", "def known_mines(self):\n \n if len(self.cells) == self.count:\n return self.cells", "def problem5(self, s):\n points = 0\n\n points = self.neighbor( 10, 10, s.nearest_neighbor)*3\n points += self.neighbor(100, 10, s.nearest_neighbor)*3\n points += self.neighbor( 10, 100, s.nearest_neighbor)*3\n points += self.neighbor(100, 100, s.nearest_neighbor)*3\n points += self.neighbor(100, 100, s.nearest_neighbor)*3\n\n _testDriver.get_code(s.nearest_neighbor)\n print \"\\n(Check that scipy.spatial.KDTree is not used)\"\n points *= self.grade(1)\n \n return points", "def _find_positive_ancestor(self, refdata, seedindex): \n \n seedval = refdata[seedindex]\n if seedval > self.row_priors[seedindex]: \n return seedindex, -seedval/self.row_priors[seedindex]\n \n # find parents of seed\n parents = self.parents\n seedparents = parents[seedindex]\n parents_len = len(seedparents)\n if parents_len == 0:\n return None, 0\n elif parents_len == 1:\n return self._find_positive_ancestor(refdata, seedparents[0])\n elif parents_len == 2:\n # handle special case when there are only two items\n # instead of doing a general query and sort, pick best of two \n r0 = self._find_positive_ancestor(refdata, seedparents[0])\n r1 = self._find_positive_ancestor(refdata, seedparents[1])\n if r1[1] < r0[1]:\n return r1 \n return r0 \n \n # study multiple paths toward root, return most enriched\n result = [self._find_positive_ancestor(refdata, _) for _ in seedparents] \n return min(result, key=itemgetter(1))", "def choose_random_site(self):\n return [self.rng.integers(low=0, high=self.num_spins - 1)]", "def get_near(self,map):\n near_cells = []\n for i in range(self.x-1, self.x+2):\n for j in range(self.y-1, self.y+2):\n if(i>=0 and i<map.size and j>=0 and j<map.size): near_cells.append(map.search(i,j))\n return near_cells", "def _choose_position(self):\n \n \n # function used to find R given M\n def _root_function(r, func, uval, m_tot):\n \n return uval * m_tot - func(r)\n \n # optimization switches\n if self.optimize:\n mass_func = self._interpolate_cumulative_mass\n \n umin = self._mass_umin\n umax = self._mass_umax\n else:\n # use exact profile\n mass_func = self.DF.dprof.cumulative_mass\n \n umin = mass_func(self.DF.dprof.small_r) / self.DF.dprof.M_sys\n umax = mass_func(self.DF.dprof.large_r) / self.DF.dprof.M_sys\n \n \n failed = True\n # root finder may fail occasionally if r is too close to zero\n # keep drawing random number until it works.\n # alternate soln would be to draw from M(small_r)/M_tot to\n # M(large_r) / M_tot instead of 0 to 1... \n #while failed:\n i = 0\n while (failed and i < 100):\n \n try:\n u = np.random.rand()*(umax - umin) + umin\n\n r = opt.brentq(_root_function, self.DF.dprof.small_r, self.DF.dprof.large_r, \n args=(mass_func ,u, self.DF.dprof.M_sys,))\n failed = False\n \n except:\n failed = True\n _my_print('Root finder for position failing for the %004i time. Re-rolling.'%(i))\n \n \n i = i + 1\n # except: \n # failed = True\n\n \n \n return r", "def position_from_seed(seed):\n random.seed(seed)\n ascii_character_sum = sum(bytearray(seed, \"utf8\")) # Sums the ASCII values of every character\n offset = random.randint(1, 100)\n start_position = (math.log(ascii_character_sum / 100) + offset, math.log(ascii_character_sum / 100) + offset)\n end_positon = (start_position[0] + 100, start_position[1] + 100)\n square_position = (start_position, end_positon)\n print(square_position)\n \n return square_position", "def recover_seed(ct: bytes, known: bytes):\n for seed in range(2 ** 16):\n mt = MersenneTwisterStreamCipher(seed)\n cpt = mt.decrypt(ct)\n if cpt is not None and known in cpt:\n return seed", "def test_dbscan_min(self):\n pfs_file = os.path.join(\"tests\", \"data\", \"positionfixes.csv\")\n pfs = ti.read_positionfixes_csv(pfs_file, sep=\";\", tz=\"utc\", index_col=\"id\", crs=\"epsg:4326\")\n _, sp = pfs.as_positionfixes.generate_staypoints(\n method=\"sliding\", gap_threshold=1e6, dist_threshold=0, time_threshold=0\n )\n _, locs_user = sp.as_staypoints.generate_locations(\n method=\"dbscan\", epsilon=1e-18, num_samples=1, agg_level=\"user\"\n )\n _, locs_data = sp.as_staypoints.generate_locations(\n method=\"dbscan\", epsilon=1e-18, num_samples=1, agg_level=\"dataset\"\n )\n # With small hyperparameters, clustering should not reduce the number\n assert len(locs_user) == len(sp)\n assert len(locs_data) == len(sp)", "def find_indirect_gap(self,rpts=5):\n # First find the miniumu of the upper band.\n # Start with a random point in the BZ.\n x0up=[self.kS.kx0+random()*(self.kS.kxmax-self.kS.kx0),self.kS.ky0+random()*(self.kS.kymax-self.kS.ky0)]\n # Define functions to minimize\n fun1= lambda x: self.Ham_eigvals(x[0],x[1])[self.NL]\n # Optimize initial guess.\n x1up=optimize.minimize(fun1,x0up).x\n valup=fun1(x1up)\n # Reiterate to check for local minima.\n for ix in range(rpts):\n for iy in range(rpts):\n x0up=[self.kS.kx0+random()*(self.kS.kxmax-self.kS.kx0),self.kS.ky0+random()*(self.kS.kymax-self.kS.ky0)]\n xnew1up=optimize.minimize(fun1,x0up).x\n if fun1(xnew1up)<valup:\n x1up=xnew1up\n valup=fun1(x1up)\n # Also always check special points in the BZ\n x0up=[0.,(4.*pi/3.)/np.sqrt(3.)]\n xnew1up=optimize.minimize(fun1,x0up).x\n if fun1(xnew1up)<valup:\n x1up=xnew1up\n valup=fun1(x1up)\n x0up=[2.*pi/3.,(2.*pi/3.)/np.sqrt(3.)]\n xnew1up=optimize.minimize(fun1,x0up).x\n if fun1(xnew1up)<valup:\n x1up=xnew1up\n valup=fun1(x1up)\n \n # Repeat the same for the lower band\n x0dn=[self.kS.kx0+random()*(self.kS.kxmax-self.kS.kx0),self.kS.ky0+random()*(self.kS.kymax-self.kS.ky0)]\n # Define functions to minimize\n fun2= lambda x: -self.Ham_eigvals(x[0],x[1])[self.NL-1]\n # Optimize initial guess.\n x1dn=optimize.minimize(fun2,x0dn).x\n valdn=fun2(x1dn)\n # Reiterate to check for local minima.\n for ix in range(rpts):\n for iy in range(rpts):\n x0dn=[self.kS.kx0+random()*(self.kS.kxmax-self.kS.kx0),self.kS.ky0+random()*(self.kS.kymax-self.kS.ky0)]\n xnew1dn=optimize.minimize(fun2,x0dn).x\n if fun2(xnew1dn)<valdn:\n x1dn=xnew1dn\n valdn=fun2(x1dn)\n # Also always check special points in the BZ\n x0dn=[0.,(4.*pi/3.)/np.sqrt(3.)]\n xnew1dn=optimize.minimize(fun2,x0dn).x\n if fun2(xnew1dn)<valdn:\n x1dn=xnew1dn\n valdn=fun2(x1dn)\n x0dn=[2.*pi/3.,(2.*pi/3.)/np.sqrt(3.)]\n xnew1dn=optimize.minimize(fun2,x0dn).x\n if fun2(xnew1dn)<valdn:\n x1dn=xnew1dn\n valdn=fun2(x1dn)\n \n return valup+valdn,x1up,x1dn", "def search(self):\n open_set = set()\n closed_set = set()\n open_set.add(self.start_node)\n\n # loop through all nodes until open set is empty to build neighbor map\n while open_set:\n current_node = open_set.pop()\n closed_set.add(current_node)\n for removed_cells, score, next_status in current_node.find_next_moves():\n open_status_set = [i.status for i in open_set]\n closed_status_set = [i.status for i in closed_set]\n if next_status in open_status_set:\n index = open_status_set.index(next_status)\n node = list(open_set)[index]\n elif next_status in closed_status_set:\n index = closed_status_set.index(next_status)\n node = list(closed_set)[index]\n else:\n node = PopstarsNode(next_status)\n open_set.add(node)\n node.parents.append(current_node)\n current_node.children[node].append(\n (score, removed_cells, True))\n current_node.update_parents()\n max_score = []\n for i in self.start_node.children:\n max_score += self.start_node.children[i]\n return max(max_score)[0]", "def find_closest_addr(self, *args):\n return _ida_hexrays.citem_t_find_closest_addr(self, *args)", "def get_startpop(self, seed=\"\"):\n # Return a blank string list if no seed given\n if seed == \"\":\n self.logprint(\"Seed: None.\")\n self.startpop = [\"\"]\n return\n # Otherwise, get list of seed populations\n pops = self.get_seed_all(seed)\n self.startpop = pops", "def face_nearest_block(self):\n try:\n block = self.swarmie.get_nearest_block_location(\n use_targets_buffer=True\n )\n except tf.Exception:\n # The caller should be about to exit with a normal exit code\n # after this call anyway, so the pickup behavior is launched.\n return\n\n if block is not None:\n angle = self.get_angle_to_face_point(block)\n self.swarmie.turn(angle, ignore=Obstacle.IS_VISION, throw=False)\n\n return", "def unoccupied_cooling_setpoint(self) -> int | None:\n return self.cluster.get(\"unoccupied_cooling_setpoint\")", "def checked_positions():\n for base_position in chain([me.shipyard], me.get_dropoffs()):\n x_shipyard = base_position.position.x\n y_shipyard = base_position.position.y\n for x in range(-search_range, search_range):\n for y in range(-search_range, search_range):\n yield hlt.Position(\n x=x_shipyard + x,\n y=y_shipyard + y)", "def __random_pickup(self, guess):\n already_clustered = guess.sum(axis=0)\n while True:\n p1 = random.randint(0, guess.shape[1] - 1)\n p2 = random.randint(0, guess.shape[2] - 1)\n if not already_clustered[p1, p2]:\n return (p1, p2)", "def closest_breakpoint(self, chromosome, position):\n breaks = self.breaks[(chromosome, self.selected_k[chromosome])]\n i = bisect.bisect_right(breaks, position)\n if i == 0:\n return breaks[0]\n elif i == len(breaks):\n return breaks[i - 1]\n elif abs(position - breaks[i - 1]) < abs(position - breaks[i]):\n return breaks[i - 1]\n else:\n return breaks[i]", "def find_direct_gap(self,rpts=5):\n # Start with a random point in the BZ.\n x0up=[self.kS.kx0+random()*(self.kS.kxmax-self.kS.kx0),self.kS.ky0+random()*(self.kS.kymax-self.kS.ky0)]\n # Define functions to minimize\n fun1= lambda x: self.Ham_eigvals(x[0],x[1])[self.NL]-self.Ham_eigvals(x[0],x[1])[self.NL-1]\n # Optimize initial guess.\n x1up=optimize.minimize(fun1,x0up).x\n valup=fun1(x1up)\n # Reiterate to check for local minima.\n for ix in range(rpts):\n for iy in range(rpts):\n x0up=[self.kS.kx0+random()*(self.kS.kxmax-self.kS.kx0),self.kS.ky0+random()*(self.kS.kymax-self.kS.ky0)]\n xnew1up=optimize.minimize(fun1,x0up).x\n if fun1(xnew1up)<valup:\n x1up=xnew1up\n valup=fun1(x1up)\n # Also always check special points in the BZ\n x0up=[0.,(4.*pi/3.)/np.sqrt(3.)]\n xnew1up=optimize.minimize(fun1,x0up).x\n if fun1(xnew1up)<valup:\n x1up=xnew1up\n valup=fun1(x1up)\n x0up=[2.*pi/3.,(2.*pi/3.)/np.sqrt(3.)]\n xnew1up=optimize.minimize(fun1,x0up).x\n if fun1(xnew1up)<valup:\n x1up=xnew1up\n valup=fun1(x1up)\n \n return valup,x1up", "def test_get_closest_stations(self):\n\t\tpoint = \"POINT(40.71911552 -74.00666661)\"\n\t\tstations = set(server.get_closest_stations(point))\n\t\t# find the closest stations, make them a set of objects see if sets intersect completely", "def saddle_point(self):\n\n maxmin_value, maxmin_strategy_set = self.maxmin(0)\n minmax_value, minmax_strategy_set = self.minmax(1)\n\n if maxmin_value == minmax_value:\n return maxmin_strategy_set.intersection(minmax_strategy_set)\n return None", "def get_random_neighbor(self):\r\n r_mask = np.random.randint(0, 2, 7)\r\n r_exp = np.random.randint(0, 2, 7)\r\n changes = r_mask*self.attr_factors*np.power(-1.0, r_exp)\r\n Sp = self.S + changes\r\n basal_C = Sp[4]\r\n Sp = np.maximum(Sp, 0.0)\r\n Sp[4] = basal_C\r\n return Sp", "def find_background_point(mask):\n zerocoords = np.where(mask == 0)\n i = np.random.randint(0,len(zerocoords[0]))\n coord = zerocoords[0][i]\n for n in range(1, len(zerocoords)):\n coord = np.append(coord, zerocoords[n][i])\n return tuple(coord)", "def sample_valid_seeds(mask: Tensor, num_sampled_seed: int = 1024) -> Tensor: # noqa: E501\n device = mask.device\n batch_size = mask.shape[0]\n sample_inds = mask.new_zeros((batch_size, num_sampled_seed),\n dtype=torch.int64)\n for bidx in range(batch_size):\n # return index of non zero elements\n valid_inds = torch.nonzero(mask[bidx, :]).squeeze(-1)\n if len(valid_inds) < num_sampled_seed:\n # compute set t1 - t2\n t1 = torch.arange(num_sampled_seed, device=device)\n t2 = valid_inds % num_sampled_seed\n combined = torch.cat((t1, t2))\n uniques, counts = combined.unique(return_counts=True)\n difference = uniques[counts == 1]\n\n rand_inds = torch.randperm(\n len(difference),\n device=device)[:num_sampled_seed - len(valid_inds)]\n cur_sample_inds = difference[rand_inds]\n cur_sample_inds = torch.cat((valid_inds, cur_sample_inds))\n else:\n rand_inds = torch.randperm(\n len(valid_inds), device=device)[:num_sampled_seed]\n cur_sample_inds = valid_inds[rand_inds]\n sample_inds[bidx, :] = cur_sample_inds\n return sample_inds", "def select_bin_tour_min(population, tsp):\n parents = []\n while len(parents) < 2:\n best = None \n for i in range(2):\n ind = population[random.randint(0, len(population)-1)]\n if (best == None) or (fitness_tsp(ind, tsp) > fitness_tsp(best, tsp)):\n best = ind\n parents.append(best)\n return parents", "def get_st(ts, bs_map):\n board = ts.observation['board']\n return bs_map[pl_box_coords(board)]", "def find_random_spot(self):\n random_line_index = []\n random_col = 0\n random_col_index = 100\n\n # -- loops through maze and stops when it finds an open spot\n while random_col != \" \":\n random_line_index = randrange(len(self._grid))\n random_col_index = randrange(len(self._grid[random_line_index]))\n random_col = self._grid[random_line_index][random_col_index]\n # -- returns the coordinates of the open spot\n coordinates = (random_line_index, random_col_index)\n return coordinates", "def _get_random_pos_on_top(self):\n # z = self._top_position() - self.offset\n z = self.upper_vertex[2] - self.offset\n x_lower, x_upper = self._shrink_range_by_padding(self._x_range())\n y_lower, y_upper = self._shrink_range_by_padding(self._y_range())\n x = random.uniform(x_lower, x_upper)\n y = random.uniform(y_lower, y_upper)\n return x, y, z", "def defenderGoal(self, point, myPos):\n (x, y) = point\n temp = self.scanmap.adjacentValidPoints(x, y)\n targets = []\n for i in temp:\n (x, y) = i\n targets += self.scanmap.adjacentValidPoints(x, y)\n targets = list(dict.fromkeys(targets))\n targets.remove(point)\n\n minDis = self.getMazeDistance(myPos, targets[0])\n nearestDefender = targets[0]\n for j in targets:\n dis = self.getMazeDistance(myPos, j)\n if dis < minDis:\n minDis = dis\n nearestDefender = j\n return nearestDefender", "def find_startpos(self, searched_object:str):\r\n fak = 1 #< When the figure needs to be pushed to the right -> fak = 1 else fak = 0\r\n # The main figures spwan position beginns at index 14 and ends at size(self.look_up_table) - 9\r\n start_index = 14\r\n y = start_index \r\n end_index = -9\r\n for x in self.look_up_table[start_index : end_index]:\r\n # When the serached object is in the row then get the index of it\r\n if searched_object in x:\r\n x = x.index(searched_object)\r\n break\r\n y += 1\r\n # Pac-Man does not need to push to the right\r\n if searched_object == 'PACMAN':\r\n fak = 0\r\n return x * self.grid_size + fak * self.grid_size // 2, y * self.grid_size", "def clumpfind(\n self,\n levels=None,\n corners=False,\n seeded=False,\n allow_new_peaks=True,\n timer=True\n ):\n\n # ...................................................\n # Check user options\n # ...................................................\n\n if self.linked_data == None:\n print \"Clumpfind assignment requires data.\"\n return\n \n if seeded == True:\n if self.linked_lmax == None:\n print \"Seeded clumpfind assignment requires local maxima.\"\n return\n \n if seeded == False and allow_new_peaks == False:\n print \"Cannot run an unseeded (classic) clumpfind without being able to add seeds.\"\n return\n\n # ...................................................\n # Get data to use\n # ................................................... \n\n # Get the data and set the values we will not use to a low\n # number that will be ignored by the algorithm.\n\n data = copy.deepcopy(self.linked_data.data)\n if self.linked_mask != None:\n use = self.linked_mask.data*self.linked_data.valid\n else:\n use = self.linked_data.valid\n min_use = np.min(self.linked_data.data[use])\n max_use = np.max(self.linked_data.data[use])\n low_value = min_use-1.\n data[(use==False)] = low_value\n\n # ...................................................\n # Calculate contour levels\n # ...................................................\n\n if levels == None:\n if self.linked_data.noise != None:\n print \"Defaulting to 2 sigma spacing.\"\n levels = contour_values(\n linspace = True,\n maxval = max_use,\n minval = min_use, \n spacing = 2.0*self.linked_data.noise.scale\n )\n else:\n print \"Need a noise estimate.\"\n return\n\n self.levels = levels\n\n # ...................................................\n # Build the structuring element\n # ...................................................\n\n structure = (Struct(\n \"simple\", \n ndim=self.linked_data.data.ndim, \n corners=corners)).struct\n\n # ...................................................\n # Initialize the output\n # ...................................................\n\n # ... data\n self.data = np.zeros_like(data, dtype=np.int)\n\n # ... local maxima\n if seeded == False:\n print \"Initializing a new set of local maxima\"\n self.linked_lmax = \\\n lmax.Lmax(self.linked_data, self.linked_mask)\n\n # ...................................................\n # Loop over levels (from high to low)\n # ...................................................\n\n nlev = len(levels)\n count = 0\n\n for level in levels: \n\n # ........................\n # Print a counter\n # ........................\n\n perc = count*1./nlev\n sys.stdout.write('\\r') \n sys.stdout.write(\"Clumpfind level %d out of %d\" % (count, nlev))\n sys.stdout.flush()\n count += 1\n\n # ............................\n # Label regions for this level\n # ............................\n\n thresh = (data >= level)\n labels, ncolors = ndimage.label(\n thresh,\n structure=structure)\n \n # ...........................\n # Vectorize the labeled data\n # ...........................\n\n # This gives a big speedup for sparse data.\n\n ind = np.where(thresh)\n val = self.linked_data.data[ind]\n ind_arr = cube.xyztup_to_array(ind, coordaxis=1)\n label_vec = labels[ind]\n\n # Get the assignments for the current seeds\n if self.linked_lmax.num > 0:\n seed_labels = labels[self.linked_lmax.as_tuple()]\n \n # ........................................\n # Loop over discrete regions at this level\n # ........................................\n\n for label in range(1,ncolors+1):\n \n # ........................................\n # Get the indices for this region\n # ........................................\n\n this_color = np.where(label_vec == label)\n this_val = val[this_color]\n this_ind_arr = ind_arr[this_color[0],:]\n this_ind = cube.xyzarr_to_tuple(this_ind_arr,coordaxis=1)\n\n # ........................................\n # Check if we should add a new peak\n # ........................................\n\n # If there are no peaks or if there are no peaks in\n # this region, we want to add a new one --- but only\n # if that's allowed! \n\n # A future extension is to add additional criteria\n # that must be met to add a peak (volume, area, etc.)\n\n if self.linked_lmax.num == 0:\n if allow_new_peaks:\n add_a_new_peak = True\n else:\n continue\n elif np.sum(seed_labels == label) == 0:\n if allow_new_peaks:\n add_a_new_peak = True\n else:\n continue\n else:\n add_a_new_peak = False\n \n # ........................................\n # Add a new peak\n # ........................................\n\n if add_a_new_peak:\n\n # Find the location of the maximum value\n maxind = np.argmax(this_val)\n\n # Get the corresponding coordinates\n peak_index = this_ind_arr[maxind,:]\n\n # Add a local maximum\n new_name = self.linked_lmax.add_local_max(peak_index)\n\n # Label these data in the assignment cube\n self.data[this_ind] = new_name\n\n continue\n\n # ........................................\n # Deal with the case of a signle seed\n # ........................................\n\n if np.sum(seed_labels == label) == 1:\n \n maxind = np.where((seed_labels == label))\n\n self.data[this_ind] = self.linked_lmax.name[maxind]\n\n continue\n\n # ........................................\n # Deal with the case of competing seeds\n # ........................................\n\n # Several matching labels\n if np.sum(seed_labels == label) > 1:\n\n # Initialize an assignment vector\n this_assign = np.zeros_like(this_val)\n best_dist = np.zeros_like(this_val)\n\n # Identify the competing seeds\n maxind = np.where((seed_labels == label))\n\n n_max = len(maxind[0])\n\n for i in range(n_max):\n \n this_max_name = self.linked_lmax.name[maxind[0][i]]\n\n this_max_coord = self.linked_lmax.indices[this_max_name-1]\n\n dist_to_this_max = \\\n np.sum((this_ind_arr - this_max_coord)**2,axis=1)\n \n if i == 0:\n # ... all true for the first test\n is_closest = (dist_to_this_max == dist_to_this_max)\n else:\n is_closest = (dist_to_this_max < best_dist)\n\n this_assign[is_closest] = this_max_name\n best_dist[is_closest] = dist_to_this_max[is_closest]\n\n\n self.data[this_ind] = this_assign", "def cheapest_spanning_edge(self, mst, graph):\n min_edge_cost = float('inf')\n vertice_not_in_mst = None\n min_edge = None\n for mst_vertice in mst.vertices:\n for neighbor in graph.vertices[mst_vertice]:\n if neighbor not in mst.vertices:\n edge = frozenset([mst_vertice, neighbor])\n edge_cost = graph.edges[edge]\n if edge_cost < min_edge_cost:\n vertice_not_in_mst = neighbor\n neighbors = graph.vertices[neighbor]\n min_edge_cost = edge_cost\n min_edge = edge\n\n return vertice_not_in_mst, neighbors, min_edge, min_edge_cost", "def find_offsets(self):\r\n\r\n #### Begin functionality here\r\n\r\n return()", "def _add_peaks_from_seed_points3d(self, seed_points_3d, style='cone', steepness=1):\n if style == 'cone':\n z_func = Point2D.distance2d\n elif style == 'pyramid':\n z_func = Point2D.max_orthogonal_distance\n else:\n raise Exception('Unknown Style:' + style)\n\n # Go through all points. Pick the closest seed to the point, and calculate its z value and insert it\n for y in range(self._lower_left.y, self._upper_right.y + 1):\n for x in range(self._lower_left.x, self._upper_right.x + 1):\n grid_point = Point2D(x, y)\n # consider using scipy.spatial.distance.cdist if moving to numpy. Lots of metric options to use!\n closest_seed = min(seed_points_3d, key=lambda seed: grid_point.distance2d(seed))\n delta_z = round(steepness * (closest_seed.z - z_func(grid_point, closest_seed)))\n if delta_z < 0:\n delta_z = 0\n # get existing value if any for this point (in case adding a layer)\n z = self._tm.get_z(grid_point,\n default=0)\n self._tm.set_z(Point2D(x, y), z + delta_z)\n\n return self._tm", "def dfs(sx, sy, tx, ty):\n seen = {(sx, sy)}\n stack = [(sx, sy)]\n while stack: \n x, y = stack.pop()\n if abs(x - sx) + abs(y - sy) > 200 or (x, y) == (tx, ty): return True \n for xx, yy in (x-1, y), (x, y-1), (x, y+1), (x+1, y): \n if 0 <= xx < 1e6 and 0 <= yy < 1e6 and (xx, yy) not in blocked and (xx, yy) not in seen: \n seen.add((xx, yy))\n stack.append((xx, yy))\n return False", "def decomposeSeedPos( self, bSeed ):\n\n\t\ttry:\n\t\t\tbSeed = long( bSeed )\n\t\t\t\n\t\t\t# This is a copy of the functions in product-key/ck-common.c\n\t\t\t# They are rewritten here to save on shelling out with each\n\t\t\t# rda-backend that checks in.\n\t\t\tbSerial = self._getSerial( bSeed )\n\t\t\t#sVersion = self._getVersion( bSeed )\n\t\t\t#bNumcam = self._getNumcam( bSeed )\n\t\t\t#Jake Add something here\n\t\t\tbPosLock = self._getPosLock(bSeed)\n\t\t\tsMac = self._getMac( bSeed )\n\n\t\t\treturn ( bSerial, bPosLock, sMac )\n\n\t\texcept Exception, e:\n\t\t\terrMsg( 'error while decomposing key' )\n\t\t\traise", "def find_list_for_new_point(self, point):\n hash_code = get_parent_hash(point)\n for i, _ in enumerate(self._points):\n for point_move in self._points[i]:\n if hash_graphics_point(point_move) == hash_code:\n return i\n\n return None", "def findPathToClosestDot(self, gameState):\n # Here are some useful elements of the startState\n startPosition = gameState.getPacmanPosition()\n food = gameState.getFood()\n walls = gameState.getWalls()\n problem = AnyFoodSearchProblem(gameState)\n\n \"*** YOUR CODE HERE ***\"\n return breadthFirstSearch(problem)\n # util.raiseNotDefined()", "def closest_point(self, point, start_param=None, Ns=25):\n x, z = self.rotate_to_xz_plane(point)\n la = self._closest_point(x, z, start_param, Ns)\n return la", "def check_point(self, mz, rt):\n regions = self.boxes_mz.at(mz)\n hits = set()\n for r in regions:\n if r.data.rt_match(rt):\n hits.add(r.data)\n return hits", "def discover_map(self):\n frontier = Queue()\n cleared = {self.position}\n for pos in self._check_neighbors():\n frontier.put(pos)\n self.add_node(pos, self.position)\n while not frontier.empty():\n next = frontier.get()\n if next not in cleared:\n self.move_to(next)\n for pos in self._check_neighbors():\n self.add_node(pos, self.position)\n frontier.put(pos)\n cleared.add(self.position)\n\n return tuple(self.grid[2])[0]", "def find_nearest_neighbour_from_point(point_cloud:np.ndarray, point:int) -> int:\n pass", "def nearest_point(pt):\n nearest_point = None\n min_dist = float(\"inf\")\n for p in cur_points:\n dist = euclidean_dist(pt, p.to_tuple())\n if dist < min_dist:\n min_dist, nearest_point = dist, p\n\n return nearest_point.to_tuple()", "def getStartPosMapper(seq, subst=None):\n if subst is None:\n subst = make_identity_substitution_matrix(1, -1, alphabet=AALPHABET)\n def findPos(pep):\n d = ssw(pep)\n return int(d['query_begin'] - d['target_begin'])\n \n ssw = StripedSmithWaterman(query_sequence=seq,\n protein=True,\n substitution_matrix=subst)\n return findPos", "def getNearestNode(self, point):\n nodes = list(mm.nodeidx.nearest((point.getPoint().x, point.getPoint().y)))\n return self.node_counter__node.get(nodes[0])", "def best_unexplored_lower_bound(self):\n if self._unexplored_nodes:\n return min(node.lower_bound for node in self._unexplored_nodes)\n else:\n return 0.0", "def tinyMazeSearch(problem):\r\n from game import Directions\r\n s = Directions.SOUTH\r\n w = Directions.WEST\r\n return [s,s,w,s,w,w,s,w]", "def unblind_steady_map(\n index, seed, smear=True, local_skymap=False,\n verbose=True\n ):\n \n smear_str = 'smeared/' if smear else 'norm_prob/'\n alert_df = pd.read_csv(f_path + 'icecube_misc/alert_dataframe.csv')\n event_id = alert_df.iloc[index]['Event ID']\n run_id = alert_df.iloc[index]['Run ID']\n # Commented path is the original trials location\n # base_trial_path = '/data/user/apizzuto/fast_response_skylab/' \\\n # + 'alert_event_followup/analysis_trials/'\n base_trial_path = os.path.join(os.path.expandvars(\"$PWD\"), \"analysis_trials/\")\n if not os.path.exists(base_trial_path):\n os.mkdir(base_trial_path)\n if not os.path.exists(base_trial_path + 'results/'):\n os.mkdir(base_trial_path + 'results/')\n if not os.path.exists(base_trial_path + 'results/' + smear_str):\n os.mkdir(base_trial_path + 'results/' + smear_str)\n if not local_skymap:\n outfile = base_trial_path + 'results/' \\\n + '{}index_{}_run_{}_event_{}_steady_seed_{}.pkl'.format(smear_str, index, run_id, event_id, seed)\n else:\n outfile = base_trial_path + 'results/' \\\n + '{}index_{}_run_{}_event_{}_steady_ts_map_seed_{}.pkl'.format(\n smear_str, index, run_id, event_id, seed\n )\n\n t0 = time.time()\n nside = 2**7\n multillh, spatial_prior = config(\n index, gamma = 2.0, seed = seed, scramble = False, nside=nside, \n ncpu = 1, injector = False, verbose=verbose, smear=smear,\n remove = True\n )\n\n t1 = time.time()\n if verbose:\n print('{:.2f} seconds to Initialize Likelihoods'.format(t1 - t0))\n print (\"\\nRunning fit on real data ...\")\n\n allspots = None\n ii = 1\n n_iter = 2 if not local_skymap else 1\n for results, hotspots in multillh.do_allsky_trials(\n n_iter= n_iter, injector=None, nside=nside, rng_seed = 123*seed + ii,\n spatial_prior=spatial_prior, follow_up_factor = 1,\n scramble = False\n ):\n if verbose:\n print('Trial Number: {}'.format(ii))\n ii += 1\n if not local_skymap:\n if allspots is None:\n allspots = {}\n for k, v in hotspots['spatial_prior_0']['best'].items():\n allspots[k] = [v]\n if 'pix' not in allspots.keys():\n allspots['pix'] = [0]\n if 'nside' not in allspots.keys():\n allspots['nside'] = [0]\n else:\n for k, v in hotspots['spatial_prior_0']['best'].items():\n allspots[k].append(v)\n if 'pix' not in hotspots['spatial_prior_0']['best'].keys():\n allspots['pix'].append(0)\n if 'nside' not in hotspots['spatial_prior_0']['best'].keys():\n allspots['nside'].append(0)\n else:\n allspots = results\n\n if local_skymap:\n allspots = allspots[allspots['TS'] != 0.]\n\n dt1 = t1 - t0\n dt = time.time() - t0\n if verbose:\n print(\"Finished script in {} seconds\".format(dt))\n print(\"Initialization: {} seconds\\ntrials: {} seconds\".format(\n dt1, (dt-dt1)\n ))\n\n with open(outfile, 'wb') as f:\n pickle.dump(allspots, f, protocol=pickle.HIGHEST_PROTOCOL)", "def point_random_position(self, point_distribution, mask):\n batch_size = point_distribution.size(0)\n mask_np = to_np(mask) # batch x time\n indices = []\n for i in range(batch_size):\n msk = mask_np[i] # time\n indices.append(np.random.choice(len(msk), 2, p=msk / np.sum(msk, -1)))\n indices = to_pt(np.stack(indices, 0), self.use_cuda) # batch x 2\n return indices", "def select_sensitive_point(self, evolver: 'Evolver') -> bool:\r\n\r\n self.sens_sweep_len += 1\r\n\r\n if self.sensitive[1].categorical:\r\n current_bests = dict((k, v.max()) for k,v in self.sens_sweep.items())\r\n evolver.logger.debug('Sensitive %s bests: %s', self.sensitive[1].name, current_bests)\r\n del current_bests\r\n else:\r\n evolver.logger.debug('Sensitive %s | keys=%s, bests=%s', self.sensitive[1].name,\r\n self.sens_sweep_pts[:self.sens_sweep_len],\r\n self.sens_sweep[:self.sens_sweep_len].max(1))\r\n\r\n if self.sens_seed_points_ind < len(self.sens_seed_points):\r\n self.sens_seed_points_ind += 1\r\n if self.sens_seed_points_ind < len(self.sens_seed_points):\r\n self.sens_checking = self.sens_seed_points[self.sens_seed_points_ind]\r\n evolver.logger.info('Sweeping seed %s/%s: %s', self.sens_seed_points_ind+1,\r\n len(self.sens_seed_points), self.sens_checking)\r\n if self.sensitive[1].categorical:\r\n self.sens_sweep[self.sens_checking] = (\r\n np.zeros(evolver.settings.max_trials, dtype='float64'))\r\n return True\r\n\r\n if self.sensitive[1].categorical:\r\n evolver.logger.info('Categorical sensitive and no more seed points -> done')\r\n return False\r\n\r\n self.sens_salient_points_ind = 0\r\n if evolver.settings.salient_points == 0:\r\n return False\r\n else:\r\n self.sens_salient_points_ind += 1\r\n if self.sens_salient_points_ind == evolver.settings.salient_points:\r\n return False\r\n\r\n if self.sensitive[1].integral:\r\n assert self.sens_sweep_pts.dtype == 'int32'\r\n salient_pt = IntegralSalienceComputer.hint(\r\n self.sensitive[1].domain, self.sens_sweep_pts[:self.sens_sweep_len],\r\n self.sens_sweep[:self.sens_sweep_len])\r\n if salient_pt is None:\r\n return False\r\n self.sens_checking = salient_pt\r\n else:\r\n assert self.sens_sweep_pts.dtype == 'float64'\r\n self.sens_checking = ContinuousSalienceComputer.hint(\r\n self.sensitive[1].domain, self.sens_sweep_pts[:self.sens_sweep_len],\r\n self.sens_sweep[:self.sens_sweep_len])\r\n\r\n evolver.logger.info('Sweeping salient %s/%s: %s', self.sens_salient_points_ind+1,\r\n evolver.settings.salient_points, self.sens_checking)\r\n return True" ]
[ "0.59967655", "0.55694646", "0.5480614", "0.5397099", "0.5393073", "0.5385499", "0.5333922", "0.52977884", "0.52768016", "0.5189014", "0.51889", "0.518867", "0.5116351", "0.5088275", "0.5082775", "0.5077057", "0.50351775", "0.50136656", "0.5006063", "0.49916822", "0.49692985", "0.49560353", "0.49290282", "0.490117", "0.4896136", "0.48890227", "0.48712504", "0.485398", "0.48457998", "0.4845772", "0.48434", "0.48276594", "0.48270437", "0.4826387", "0.4822868", "0.4821611", "0.48006767", "0.47970876", "0.47948736", "0.4790604", "0.47840607", "0.4781297", "0.47802114", "0.47801432", "0.4780103", "0.47550336", "0.47550336", "0.4745887", "0.47388205", "0.47387755", "0.4738326", "0.47367793", "0.47290233", "0.4728733", "0.47170535", "0.47064635", "0.4705192", "0.4705065", "0.46880913", "0.4679157", "0.46788117", "0.46784803", "0.4672214", "0.46705306", "0.4667974", "0.46653405", "0.4663575", "0.46618256", "0.4658121", "0.46549368", "0.4649791", "0.46428603", "0.46402013", "0.46369585", "0.4632919", "0.4629203", "0.46267316", "0.46213716", "0.46025628", "0.4599973", "0.4582376", "0.4577458", "0.45728037", "0.45718586", "0.45715222", "0.45710346", "0.45638412", "0.4562939", "0.45620185", "0.45601678", "0.4560163", "0.45532548", "0.45492858", "0.4547951", "0.4543619", "0.45434287", "0.4538371", "0.45269352", "0.452277", "0.45227414" ]
0.7450136
0
Get the seed from the current model. (Depends on work in next_seed to be valid.)
def get_seed(self): return self.solver.get_model_trues(start=0, end=self.n) # slower: # model = self.solver.get_model() # return [i for i in range(self.n) if model[i]] # slowest: # seed = [] # for i in range(self.n): # if self.solver.model_value(i+1): # seed.add(i) # return seed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def seed(self):\n return self._seed", "def seed(self):\n return self._seed", "def seed(self):\n return self._seed", "def seed(self) -> int:\n return self._seed # type: ignore", "def next_seed(self):\r\n self._cur_seed += 1\r\n return self._cur_seed - 1", "def run(seed, ModelClass=Model):\n model = ModelClass(random_seed=seed)\n return model.one_trial(1, 10)", "def seed(self): # Property docstring\n\n return self._seed", "def seed(self, seed=None):\n raise NotImplementedError()", "def seed(self, seed=None):\n raise NotImplementedError()", "def _sample_seed(self):\n raise Exception(\" not implemented in base model\")", "def get_random_seed(self):\n return self.random_seed", "def _get_next_order_seed(self) -> int | None:\n # Here we have the option to shuffle the trials when specified by the user\n if self._instance_seed_order == \"shuffle\":\n seed = self._rng.randint(0, MAXINT)\n elif self._instance_seed_order == \"shuffle_once\":\n seed = 0\n else:\n seed = None\n\n return seed", "def random(cls, ch_model, seed):\n return super().random(ch_model, seed)", "def seed(self, seed: Optional[int]) -> None:\n ...", "def get_rng(environ, experiment, swabid):\n r = Random()\n r.seed(experiment.seed_strategy(environ, experiment, swabid))\n return r", "def _get_next_seed(cls, i=None):\n if cls._seed_type == SeedType.CONSTANT:\n return cls._seed_generator\n elif cls._seed_type == SeedType.GENERATED:\n return [cls._seed_generator.randint(-MAXRAND, MAXRAND)]\n elif cls._seed_type == SeedType.SPECIFIED:\n try:\n if i is None: \n i = 0\n logger.warning(\"Trying to use specified seed type without specifying index id.\")\n return (cls._seed_generator[i])\n except IndexError:\n raise TypeError(\"Seed type {} ran out of seeds.\".format(cls._seed_type))\n else:\n raise TypeError(\"Seed type {} does not support getting next seed\".format(cls._seed_type))", "def seed(self, seed=None):\n raise self.gym.seed(seed)", "def GetRandomSeed():\n return option['random_seed']", "def seed(self, seed=None):\n if isinstance(seed, list):\n # Support list of seeds as required by Gym.\n seed = seed[0]\n elif isinstance(seed, int):\n pass\n elif seed is not None:\n # If seed is None, we just return current seed.\n raise ValueError(\"Seed must be an integer.\")\n\n if seed is not None:\n self._last_seed = seed\n self._random_state.seed(seed)\n self.action_space.seed(seed)\n\n # Return list of seeds to conform to Gym specs\n return [self._last_seed]", "def random(cls, ch_model, seed, external_diffs=None):\n return super().random(ch_model, seed, external_diffs)", "def get_seed(self, seed_path):\n try:\n infile = open(seed_path, \"rb\")\n obj = pickle.load(infile)\n if not isinstance(obj, Population):\n s = \"Seed import failed: {} does not hold a Population\\\n object.\".format(seed_path)\n self.abort(TypeError, s)\n infile.close()\n return obj\n except IOError:\n s = \"Seed import failed: no file or directory under {}\".format(seed_path)\n self.abort(IOError, s)", "def choose_new_seed(self):\r\n if self.rerandomize == 'never':\r\n self.seed = 1\r\n elif self.rerandomize == \"per_student\" and hasattr(self.runtime, 'seed'):\r\n # see comment on randomization_bin\r\n self.seed = randomization_bin(self.runtime.seed, unicode(self.location).encode('utf-8'))\r\n else:\r\n self.seed = struct.unpack('i', os.urandom(4))[0]\r\n\r\n # So that sandboxed code execution can be cached, but still have an interesting\r\n # number of possibilities, cap the number of different random seeds.\r\n self.seed %= MAX_RANDOMIZATION_BINS", "def get_seed(seed=None):\n # https://groups.google.com/forum/#!topic/briansupport/9ErDidIBBFM\n random = np.random.RandomState(seed)\n return random.randint(0, 2147483647)", "def random(cls, cipher_ch_model, seed):\n return super().random(cipher_ch_model, seed)", "def _use_seed(seed):\n torch_rng_state = torch.random.get_rng_state()\n torch.manual_seed(seed)\n yield\n torch.random.set_rng_state(torch_rng_state)", "def get(self, *, name: types.TSeedName) -> types.TSeedValue:\n if not (self._base_path / self._get_file_name(name)).exists():\n raise exceptions.SeedNotFoundError(f\"could not find seed {name}\")\n return (self._base_path / self._get_file_name(name)).read_text()", "def _get_daily_seed(self):\n return int(datetime.now().strftime('%Y%m%d'))", "def seed():", "def get_seq_seed(seq_seeds, simulation_number, sequence_number):\n\n if seq_seeds:\n seq_seed = seq_seeds[simulation_number][sequence_number]\n else:\n seq_seed = None\n return seq_seed", "def seed(self, seed=None):\n #restore a previous state\n if seed is not None: self._seed(seed)\n \n #now generate a new seed and reseed\n seed = self.generate_seed()\n self._seed(seed)", "def random(self):\n try:\n return self.order_by('?')[0]\n except IndexError:\n raise self.model.DoesNotExist", "def generate_seed(self):\n int_info = np.iinfo(np.int64)\n \n return self.rng.randint(int_info.max)", "def seed():\n pass", "def seed():\n pass", "def get_model(self):\n # just return the first model, since all replicas are the same\n return self.call_async(0, '_async_get_model').gen()", "def initLocalBestChoice(self):\n random.seed()\n return", "def seed(seed: int) -> None:\n ...", "def find_above(self, seed):\n superset_exists = self.solver.solve((i + 1) for i in seed)\n if superset_exists:\n return self.get_seed()\n else:\n return None", "def target_dummy(config: Configuration, seed: int) -> int:\n return seed", "def local_seed(self) -> str:\n assert self.definition.settings.sp_root_dir\n seed_file = self.definition.settings.sp_root_dir.joinpath(\"seed.txt\")\n if not seed_file.exists():\n seed = str(encode_hex(bytes(random.randint(0, 255) for _ in range(20))))\n seed_file.write_text(seed)\n else:\n seed = seed_file.read_text().strip()\n return seed", "def fetch_seed(pseed=None):\r\n\r\n seed = pseed or config.unittests.rseed\r\n if seed == 'random':\r\n seed = None\r\n\r\n try:\r\n if seed:\r\n seed = int(seed)\r\n else:\r\n seed = None\r\n except ValueError:\r\n print >> sys.stderr, ('Error: config.unittests.rseed contains '\r\n 'invalid seed, using None instead')\r\n seed = None\r\n\r\n return seed", "def seed(self, seed=None):\r\n if seed is None:\r\n seed = self.default_seed\r\n #backport\r\n #seed = self.default_seed if seed is None else seed\r\n seedgen = numpy.random.RandomState(seed)\r\n for old_r, new_r in self.random_streams.random_state_variables:\r\n old_r_seed = seedgen.randint(2 ** 30)\r\n old_r_container = self.memo[old_r].value\r\n if old_r_container.value is None:\r\n #the cast to int here makes it work on 32bit machines,\r\n #not sure why\r\n old_r_container.value = numpy.random.RandomState(\r\n int(old_r_seed))\r\n else:\r\n #the cast to int here makes it work on 32bit machines,\r\n #not sure why\r\n old_r_container.value.seed(int(old_r_seed))", "def seed(self, seed=None):\n if seed is not None:\n self._rng.seed(seed)", "def seed_rng(self, seed):\n if self._client_state is not None:\n raise RuntimeError(\n f\"{self.__class__.__name__} does not support reseeding an instantiated client\"\n )\n self.seed = seed", "def set_seed(self, seed: int):\n self.rsimulator.set_seed(seed)\n # Maybe call new game here?", "def get_sample(self, seed):\n transformed = self.sampler_.get_sample(seed)\n return self.pca_.inverse_transform(transformed)", "def set_seed(self, seed):\n self.seed = seed", "def seed(seed: int):\n # all sampling is actually happening in the move_cube module\n move_cube.seed(seed)", "def _seed(self, seed=None):\n self.rng, seed = seeding.np_random(seed)\n return [seed]", "def generate_seeds_and_models(args, synced_model, env):\n np.random.seed()\n random_seed = np.random.randint(2**30)\n two_models = perturb_model(args, synced_model, random_seed, env)\n return random_seed, two_models", "def seed(self, seed=None):\n # to have a different environment at each time (resolve python random problem)\n self.np_random, seed1 = seeding.np_random(seed)\n seed2 = seeding.hash_seed(seed1 + 1) % 2 ** 31\n return [seed1, seed2]", "def get_rng(seed=None):\n return np.random.default_rng(seed)", "def stellar_seed(self) -> str:\n return kin_utils.encode_check('seed', bytes(self._signing_key)).decode()", "def seed_worker(_worker_id):\n worker_seed = torch.initial_seed() % 2 ** 32\n np.random.seed(worker_seed)\n random.seed(worker_seed)", "def run(self, seed=None):\n if seed is not None:\n random_seed.set_seed(seed)\n self.reset()", "def get_generator_trained_model(self):\n return Model(self.model.inputs[0], self.model.layers[1](self.model.inputs[0]))", "def worker_init_fn(worker_id):\r\n base_seed = torch.IntTensor(1).random_().item()\r\n #print(worker_id, base_seed)\r\n np.random.seed(base_seed + worker_id)", "def random_seed(seed):\n state = RandomState()\n random.seed(seed) # alter state\n np.random.seed(seed)\n torch.manual_seed(seed)\n yield\n state.set_global()", "def seed(self, seed: int) -> None:\n self.game.set_seed(seed)", "def seed_worker(worker_id):\n worker_seed = torch.initial_seed() % 2 ** 32\n np.random.seed(worker_seed)\n random.seed(worker_seed)", "def set_torch_seed(self):\n torch.manual_seed(42)", "def seed(self, evolver: 'Evolver'):\r\n settings = evolver.settings\r\n\r\n self.seed_points_ind = 0\r\n self.sweep_len = 0\r\n if self.evolve_param.categorical:\r\n self.seed_points = self.evolve_param.sample(\r\n evolver.approach_params[self.evolve_param.name], settings.seed_points)\r\n self.sweep_pts = None\r\n self.sweep = dict()\r\n else:\r\n self.seed_points = self.evolve_param.get_seeds(\r\n evolver.approach_params[self.evolve_param.name], settings.seed_points)\r\n self.sweep_pts = np.zeros(\r\n settings.seed_points + settings.salient_points, dtype=self.seed_points.dtype)\r\n self.sweep = np.zeros(self.sweep_pts.shape[0], dtype='float64')\r\n\r\n self.new_sensitives = evolver.sensitive_params.copy()", "def set_torch_seed(seed):\n rng = np.random.RandomState(seed=seed)\n torch_seed = rng.randint(0, 999999)\n torch.manual_seed(seed=torch_seed)\n\n return rng", "def worker_init_reset_seed(worker_id: int):\n initial_seed = torch.initial_seed() % 2**31\n seed_all_rng(initial_seed + worker_id)", "def random_seed(self) -> None:\n self.seed = random.SeedSequence().entropy", "def get_graph_seed(g=None):\n # fixme\n g = g or tf.get_default_graph()\n if g._seed is None:\n g._seed = 1004\n return g._seed", "def seed(self, seed=None):\r\n if seed is None:\r\n seed = self.default_instance_seed\r\n\r\n seedgen = numpy.random.RandomState(seed)\r\n for old_r, new_r in self.state_updates:\r\n old_r_seed = seedgen.randint(2 ** 30)\r\n old_r.set_value(numpy.random.RandomState(int(old_r_seed)),\r\n borrow=True)", "def reset(self, seed=None):\n super().reset(seed)\n return self", "def _seed(self, seed):\n self.world.seed(seed)", "def NewRndSeed(ss):\n ss.RndSeed = int(datetime.now(timezone.utc).timestamp())", "def get_rng(obj=None):\n seed = (id(obj) + os.getpid() +\n int(datetime.now().strftime(\"%Y%m%d%H%M%S%f\"))) % 4294967295\n if _RNG_SEED is not None:\n seed = _RNG_SEED\n return random.Random(seed)", "def get_random(self):\n return self._get_random()", "def seed(self, seed=None):\n self.np_random, seed = gym.utils.seeding.np_random(seed)\n return [seed]", "def seed_model(model):\n if model == 'all':\n seed_all()\n elif model == 'client':\n seed_client()\n elif model == 'comment':\n seed_comment()\n elif model == 'staff':\n seed_staff()\n elif model == 'request':\n seed_request()", "def seed_random_state(seed):\n if (seed is None) or (isinstance(seed, int)):\n return np.random.RandomState(seed)\n elif isinstance(seed, np.random.RandomState):\n return seed\n raise ValueError(\"%r can not be used to generate numpy.random.RandomState\"\n \" instance\" % seed)", "def saveseed(self, seed):\n savefile = gettempdir() + '/last_test_seed_fate.tmp'\n if args.verbose:\n print('Saving run into ' + savefile)\n with open(savefile, 'w') as f:\n f.write(str(seed))", "def check_manual_seed(seed: int = None):\n seed = seed or random.randint(1, 10000)\n np.random.seed(seed)\n random.seed(seed)\n torch.manual_seed(seed)\n return seed", "def random_seed(i): # -> None:\n ...", "def set_rand_seed(self, idx):\n random.seed(self.base_seed + self.epoch + idx // 2)", "def seed(self, seed: Optional[int] = None) -> Sequence[Union[None, int]]:\n if seed is None:\n # To ensure that subprocesses have different seeds,\n # we still populate the seed variable when no argument is passed\n seed = int(np.random.randint(0, np.iinfo(np.uint32).max, dtype=np.uint32))\n\n self._seeds = [seed + idx for idx in range(self.num_envs)]\n return self._seeds", "def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]", "def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]", "def setRandomSeed(self, seed):\n return self._set(randomSeed=seed)", "def setRandomSeed(self, seed):\n return self._set(randomSeed=seed)", "def setRandomSeed(self, seed):\n return self._set(randomSeed=seed)", "def setRandomSeed(self, seed):\n return self._set(randomSeed=seed)", "def setRandomSeed(self, seed):\n return self._set(randomSeed=seed)", "def seed_everything(seed=None) -> int:\n max_seed_value = np.iinfo(np.uint32).max\n min_seed_value = np.iinfo(np.uint32).min\n\n try:\n if seed is None:\n seed = _select_seed_randomly(min_seed_value, max_seed_value)\n else:\n seed = int(seed)\n except (TypeError, ValueError):\n seed = _select_seed_randomly(min_seed_value, max_seed_value)\n\n if (seed > max_seed_value) or (seed < min_seed_value):\n log.warning(\n f\"{seed} is not in bounds, \\\n numpy accepts from {min_seed_value} to {max_seed_value}\"\n )\n seed = _select_seed_randomly(min_seed_value, max_seed_value)\n\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n return seed", "def sample(self, seed=None):\n raise NotImplementedError()", "def run_seeds(self, nbrun):\n self._check_status(STATUS_IDLE)\n self._set_status(STATUS_RUNNING_SEEDS)\n self._notify_listeners_start_operation(listener.OPERATION_RUN_SEEDS)\n rsol = self.agent.run_seeds(nbrun)\n self._set_status(STATUS_IDLE)\n self._notify_listeners_end_operation()\n return rsol", "def seed():\n if User.find_by_identity(app.config['SEED_ADMIN_EMAIL']) is not None:\n return None\n\n user = User(\n role = 'admin',\n email = app.config['SEED_ADMIN_EMAIL'],\n password = app.config['SEED_ADMIN_PASSWORD']\n )\n category = Category(\n name='Red Blend',\n description='',\n parent_id=0,\n owner=1\n )\n region = Region(\n name='Columbia Valley',\n description='',\n parent_id=0,\n country='United States',\n state='Washington',\n owner=1\n )\n wine = Wine(\n name='Test Wine',\n maker='Test Maker',\n vintage='2000',\n category=1,\n region=1,\n owner=1\n )\n\n db.session.add(user)\n db.session.commit()\n db.session.add(category)\n db.session.commit()\n db.session.add(region)\n db.session.commit()\n db.session.add(wine)\n db.session.commit()\n\n return user", "def seed(self, seed=None):\n\n # Error check\n if not isinstance(seed, int) and seed is not None:\n raise ValueError('Specified seed must be integer or None')\n\n # Set seed & random number generator\n self.rng_seed = seed\n self.rng = np.random.RandomState(seed)\n\n return", "def tracking_generation_seed():\n return 112", "def seed_rng(self, seed: int | Sequence[int] | None) -> None:\n super().seed_rng(seed)\n self.rng = numpy.random.RandomState(seed)", "def seeds(self) -> Collection[Tuple[str, int]]:\n return self._seeds", "def initialize(self, seed=None):\r\n self.seed(seed)", "def seed_rng(pseed=None):\r\n\r\n seed = fetch_seed(pseed)\r\n if pseed and pseed != seed:\r\n print >> sys.stderr, 'Warning: using seed given by config.unittests.rseed=%i'\\\r\n 'instead of seed %i given as parameter' % (seed, pseed)\r\n numpy.random.seed(seed)\r\n return seed", "def handleSeeding(self):\n seeding = getattr(self.jobBag, \"seeding\", None)\n self.logger.info(\"Job seeding set to: %s\", seeding)\n if seeding == \"ReproducibleSeeding\":\n randService = self.process.RandomNumberGeneratorService\n tweak = PSetTweak()\n for x in randService:\n parameter = \"process.RandomNumberGeneratorService.%s.initialSeed\" % x._internal_name\n tweak.addParameter(parameter, x.initialSeed)\n applyTweak(self.process, tweak, self.fixupDict)\n else:\n if hasattr(self.process, \"RandomNumberGeneratorService\"):\n from IOMC.RandomEngine.RandomServiceHelper import RandomNumberServiceHelper\n helper = RandomNumberServiceHelper(self.process.RandomNumberGeneratorService)\n helper.populate()\n return", "def set_seed(seed):\n assert (type(seed) == int and seed >= 0)\n return params_func(cmd, \"set_seed\", params=[seed])", "def seed (self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]" ]
[ "0.72466826", "0.72466826", "0.72466826", "0.6889038", "0.6776114", "0.67040414", "0.66961634", "0.63759416", "0.63759416", "0.6318605", "0.62863636", "0.62448657", "0.6167233", "0.6144767", "0.61321104", "0.61201894", "0.6057582", "0.603661", "0.59608483", "0.595875", "0.5856872", "0.5817192", "0.57598746", "0.5753094", "0.57499146", "0.574159", "0.57367104", "0.5684261", "0.56622475", "0.56587934", "0.5651847", "0.56439114", "0.5597177", "0.5597177", "0.5592731", "0.55539286", "0.55472475", "0.5533529", "0.55185914", "0.5499211", "0.5492779", "0.54906297", "0.5484209", "0.54454976", "0.5435886", "0.5427135", "0.5414161", "0.5411206", "0.53971606", "0.5395883", "0.53873354", "0.53615683", "0.5320386", "0.53192246", "0.53134346", "0.5305692", "0.5287453", "0.52736884", "0.5272913", "0.5259479", "0.5255731", "0.52542573", "0.5246801", "0.5245398", "0.5244186", "0.5239301", "0.5237189", "0.5221786", "0.52139616", "0.5213607", "0.51924855", "0.51626724", "0.51519966", "0.5148361", "0.5148095", "0.5140532", "0.51351315", "0.5114949", "0.51124257", "0.5098346", "0.50965226", "0.50965226", "0.50918454", "0.50918454", "0.50918454", "0.50918454", "0.50918454", "0.50870353", "0.508541", "0.50708884", "0.50657284", "0.506412", "0.5061856", "0.5052375", "0.5044587", "0.50382787", "0.5030247", "0.50215507", "0.5014426", "0.5004825" ]
0.68926764
3
Maximize a given seed within the current set of constraints. The Boolean direction parameter specifies up (True) or down (False)
def maximize_seed(self, seed, direction): while True: comp = self.complement(seed) x = self.solver.new_var() + 1 if direction: # search for a solution w/ all of the current seed plus at # least one from the current complement. self.solver.add_clause([-x] + [i + 1 for i in comp]) # temporary clause # activate the temporary clause and all seed clauses havenew = self.solver.solve([x] + [i + 1 for i in seed]) else: # search for a solution w/ none of current complement and at # least one from the current seed removed. self.solver.add_clause([-x] + [-(i + 1) for i in seed]) # temporary clause # activate the temporary clause and deactivate complement clauses havenew = self.solver.solve([x] + [-(i + 1) for i in comp]) self.solver.add_clause([-x]) # remove the temporary clause if havenew: seed = self.get_seed() else: return seed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def maximize(self):\n raise NotImplementedError", "def solve(num_wizards, num_constraints, wizards, constraints):\n\n # print(num_wizards)\n # print(num_constraints)\n # print(wizards)\n # print(constraints)\n # node_set = set(wizards)\n \n\n\n def cost(sol,num_constraints,constraints):\n constraints_satisfied = 0\n constraints_failed = []\n output_ordering_map = {k: v for v, k in enumerate(sol)}\n for c in constraints:\n\n m = output_ordering_map # Creating an alias for easy reference\n\n wiz_a = m[c[0]]\n wiz_b = m[c[1]]\n wiz_mid = m[c[2]]\n\n if (wiz_a < wiz_mid < wiz_b) or (wiz_b < wiz_mid < wiz_a):\n constraints_failed.append(c)\n else:\n constraints_satisfied += 1\n return num_constraints - constraints_satisfied\n\n def neighbors(sol):\n wiz1 = random.randint(0,num_wizards-1)\n wiz2 = random.randint(0,num_wizards-1)\n\n new_sol = copy.copy(sol)\n temp = new_sol[wiz1]\n new_sol[wiz1] = new_sol[wiz2]\n new_sol[wiz2] = temp\n \n return new_sol\n\n def acceptance_probability(old_cost,new_cost,T):\n exponent = (old_cost - new_cost) / T\n \n try:\n ans = math.exp(exponent)\n except OverflowError:\n ans = float('inf')\n return ans\n\n\n def anneal(solution, num_constraints, constraints):\n old_cost = 0\n new_cost = 0\n old_cost = cost(solution,num_constraints,constraints)\n T = 1.0\n T_min = 0.000001\n alpha = 0.98\n while T > T_min:\n i = 1\n while i <= 1000:\n new_solution = neighbors(solution)\n new_cost = cost(new_solution,num_constraints,constraints)\n if new_cost == 0:\n return new_solution,new_cost\n ap = acceptance_probability(old_cost, new_cost, T)\n if ap > random.random():\n solution = new_solution\n old_cost = new_cost\n i += 1\n T = T*alpha\n return solution, old_cost\n\n s = copy.copy(wizards)\n random.shuffle(s)\n ret = anneal(s,num_constraints,constraints)\n \n for i in range(10):\n if ret[1] == 0:\n break\n random.shuffle(s)\n new_ret = anneal(s,num_constraints,constraints)\n print(i)\n if new_ret[1] < ret[1]:\n ret = new_ret\n print(\"constraints failed: {0}\".format(ret[1]))\n return ret[0]", "def _maximize(self, board, possible_actions, depth_limit, alpha, beta):\r\n pass", "def maximize(self, budget, optimizer):\n\n\t\tpass", "def move(self):\n self._move_range_shuffle(3)\n self._move_satisfy_random_constraint()\n # self._move_range_shuffle(3)\n #if (curr_energy > 50):\n # self._move_satisfy_random_constraint()\n #else:\n # self._move_range_shuffle(3)", "def _move_satisfy_random_constraint(self):\n secure_random = random.SystemRandom()\n done = False\n while not done:\n c = secure_random.choice(self.constraints)\n if self._is_constraint_violated(c):\n done = True\n # swap 2 wizards to move closer\n self._swap_wizards(c[random.randint(0, 1)], c[2])\n # with probability 0.5, swap the two border wizards\n if random.randint(0, 1) == 1:\n self._swap_wizards(c[0], c[1])\n if not done: print(\"Nothing to do...\")", "def make_move(self):\n\n # get relavent information\n affinity = self.get_affinity()\n sample_space = self.get_game_space()\n depth_limit = self.__search_depth\n\n # run a minimax search and get the best value\n bestval = MinimaxTree.alphabeta(self, sample_space, affinity, depth_limit, -10000, 10001, True)\n if bestval[0] is None: bestval = ((0,6),'x', 0)\n\n # print the number of nodes expanded \n print(self.nodes_expanded)\n\n # make the move found by the search \n self.get_game_space().set_tile(bestval[0][0], bestval[0][1], affinity)", "def update_heuristic(self):\n self.heuristic = self.manhattan_distance()", "def progressive_deepening(state, heuristic_fn=always_zero, depth_limit=INF,\n maximize=True) :\n anytime_value = AnytimeValue() # TA Note: Use this to store values.\n depth = 0\n while depth<=depth_limit-1:\n depth+=1\n best_option=minimax_search_alphabeta(state,-INF,INF, heuristic_fn=heuristic_fn,depth_limit=depth, maximize=True)\n anytime_value.set_value(best_option)\n return anytime_value", "def optimize_library_descent(self, target, direction='max', steps=100,\n multiprocessing=False, ret_info=False,\n args=None):\n # get the target function to call\n target_function = getattr(self, target)\n if args is not None:\n target_function = functools.partial(target_function, **args)\n\n # initialize the optimizer\n value = target_function()\n value_best, state_best = value, self.sens_mat.copy()\n \n if ret_info:\n # store extra information\n start_time = time.time()\n info = {'values': {}}\n values_count = self.parameters['optimizer_values_count']\n values_step = max(1, steps // values_count)\n \n if multiprocessing:\n # run the calculations in multiple processes\n pool_size = self.get_number_of_cores()\n pool = mp.Pool(processes=pool_size)\n if ret_info:\n values_step = max(1, values_step // pool_size)\n \n # iterate for given number of steps\n for step in range(int(steps) // pool_size):\n joblist = []\n init_arguments = self.init_arguments\n for _ in range(pool_size):\n # modify the current state and add it to the job list\n i = random.randrange(self.sens_mat.size)\n self.sens_mat.flat[i] = 1 - self.sens_mat.flat[i]\n params = init_arguments['parameters'] \n params['sensitivity_matrix'] = self.sens_mat\n params['initialize_state']['sensitivity'] = 'exact'\n \n joblist.append((copy.deepcopy(init_arguments), target))\n self.sens_mat.flat[i] = 1 - self.sens_mat.flat[i]\n \n # run all the jobs\n results = pool.map(_run_job, joblist)\n \n # find the best result \n if direction == 'max':\n res_best = np.argmax(results)\n if results[res_best] > value_best:\n value_best = results[res_best]\n state_best = joblist[res_best][0]['parameters']['sensitivity_matrix']\n # use the best state as a basis for the next iteration\n self.sens_mat = state_best\n \n elif direction == 'min':\n res_best = np.argmin(results)\n if results[res_best] < value_best:\n value_best = results[res_best]\n state_best = joblist[res_best][0]['parameters']['sensitivity_matrix']\n # use the best state as a basis for the next iteration\n self.sens_mat = state_best\n \n else:\n raise ValueError('Unsupported direction `%s`' % direction)\n \n if ret_info and step % values_step == 0:\n info['values'][step * pool_size] = results[res_best]\n \n else:\n # run the calculations in this process\n for step in range(int(steps)):\n # modify the current state\n i = random.randrange(self.sens_mat.size)\n self.sens_mat.flat[i] = 1 - self.sens_mat.flat[i]\n \n # get the value of the new state\n value = target_function()\n \n improved = ((direction == 'max' and value > value_best) or\n (direction == 'min' and value < value_best))\n if improved:\n # save the state as the new best value\n value_best, state_best = value, self.sens_mat.copy()\n else:\n # undo last change\n self.sens_mat.flat[i] = 1 - self.sens_mat.flat[i]\n \n if ret_info and step % values_step == 0:\n info['values'][step] = value_best\n\n # sort the best state and store it in the current object\n state_best = self.sort_sensitivity_matrix(state_best)\n self.sens_mat = state_best.copy()\n\n if ret_info:\n info['total_time'] = time.time() - start_time \n info['states_considered'] = steps\n info['performance'] = steps / info['total_time']\n return value_best, state_best, info\n else:\n return value_best, state_best", "def optimize_library_anneal(self, target, direction='max', steps=100,\n ret_info=False, args=None):\n # lazy import\n from .optimizer import ReceptorOptimizerAnnealer # @UnresolvedImport\n \n # prepare the class that manages the simulated annealing\n annealer = ReceptorOptimizerAnnealer(self, target, direction, args,\n ret_info=ret_info)\n annealer.steps = int(steps)\n annealer.Tmax = self.parameters['anneal_Tmax']\n annealer.Tmin = self.parameters['anneal_Tmin']\n if self.parameters['verbosity'] == 0:\n annealer.updates = 0\n\n # do the optimization\n MI, state = annealer.optimize()\n\n # sort the best state and store it in the current object\n state = self.sort_sensitivity_matrix(state)\n self.sens_mat = state.copy()\n \n if ret_info:\n return MI, state, annealer.info\n else:\n return MI, state", "def ai_move():\n\tinitial_state = map(get_filled_edges, rects)\n\tpossible_moves = []\n\tfor index, filled_edges in enumerate(initial_state):\n\t\tif filled_edges == 0:\n\t\t\tpossible_moves.extend([(index, i) for i in 'ltrb'])\n\t\telif filled_edges == 1:\n\t\t\tpossible_moves.extend(one_filled_edge(index))\n\t\telif filled_edges == 2:\n\t\t\tpossible_moves.extend(two_filled_edge(index))\n\t\telif filled_edges == 3:\n\t\t\tpossible_moves.extend(three_filled_edge(index))\n\tprint possible_moves\n\tpossible_decisions = []\n\tfor move in possible_moves:\n\t\tfinal_state = apply_move(move)\n\t\tpossible_decisions.append(is_feasible(initial_state, final_state))\n\tprint possible_decisions\n\t# randomizing when some decisions have the same weight\n\tmax_weight = max(possible_decisions)\n\t# list of indices which have the same weight\n\tmax_indices = []\n\tfor index, weight in enumerate(possible_decisions):\n\t\tif weight == max_weight:\n\t\t\tmax_indices.append(index)\n\tx = choice(max_indices)\n\tprint x\n\treturn possible_moves[x]\n\t# return possible_moves[possible_decisions.index(max(possible_decisions))]", "def expand(self, right=0, down=0, left=0, up=0):\n self.min_col -= left\n self.min_row -= up\n self.max_col += right\n self.max_row += down", "def mod_space_opt(\n *,\n space,\n dryness_method,\n fuel_build_up_method,\n include_temperature,\n discrete_params,\n defaults=None,\n basinhopping_options=None,\n minimizer_options=None,\n mode=\"basinhopping\",\n x0=None,\n):\n to_optimise = gen_to_optimise(\n fail_func=fail_func,\n success_func=success_func,\n # Init (data) params.\n dryness_method=dryness_method,\n fuel_build_up_method=fuel_build_up_method,\n include_temperature=include_temperature,\n _uncached_data=False,\n **discrete_params,\n )\n\n defaults_dict = defaults if defaults is not None else {}\n\n def to_optimise_with_discrete(x):\n return to_optimise(\n **space.inv_map_float_to_0_1(dict(zip(space.continuous_param_names, x))),\n **defaults_dict,\n )\n\n def basinhopping_callback(x, f, accept):\n # NOTE: Parameters recorded here are authoritative, since hyperopt will not\n # properly report values modified as in e.g. `mod_quniform`.\n values = {\n **space.inv_map_float_to_0_1(dict(zip(space.continuous_param_names, x))),\n **discrete_params,\n **defaults_dict,\n }\n values[\"dryness_method\"] = dryness_method\n values[\"fuel_build_up_method\"] = fuel_build_up_method\n values[\"include_temperature\"] = include_temperature\n\n minimizer_options_dict = minimizer_options if minimizer_options is not None else {}\n basinhopping_options_dict = (\n basinhopping_options if basinhopping_options is not None else {}\n )\n\n if x0 is None:\n x0 = space.continuous_x0_mid\n\n if mode == \"basinhopping\":\n res = basinhopping(\n to_optimise_with_discrete,\n x0=x0,\n seed=0,\n callback=basinhopping_callback,\n take_step=BoundedSteps(\n stepsize=0.3, rng=np.random.default_rng(0), verbose=True\n ),\n **{\n \"disp\": True,\n \"minimizer_kwargs\": dict(\n method=\"L-BFGS-B\",\n jac=None,\n bounds=[(0, 1)] * len(space.continuous_param_names),\n options={\n \"maxiter\": 60,\n \"ftol\": 1e-5,\n \"eps\": 1e-3,\n **minimizer_options_dict,\n },\n ),\n \"T\": 0.05,\n \"niter\": 100,\n \"niter_success\": 15,\n **basinhopping_options_dict,\n },\n )\n elif mode == \"minimize\":\n res = minimize(\n to_optimise_with_discrete,\n x0=x0,\n method=\"L-BFGS-B\",\n jac=None,\n bounds=[(0, 1)] * len(space.continuous_param_names),\n options={\n \"maxiter\": 60,\n \"ftol\": 1e-5,\n \"eps\": 1e-3,\n **minimizer_options_dict,\n },\n )\n else:\n raise ValueError\n\n return res", "def default_fitness(maximise):\n if maximise:\n return -100000.0\n else:\n return 100000.0", "def greedy_MAP_assignment(theta,random_runs = 10,heur = 'first'):\r\n N = theta.shape[0]\r\n scipy.random.seed()\r\n max_p = -scipy.inf\r\n for k in range(random_runs):\r\n A = scipy.random.randint(2,size = N)\r\n improved = True\r\n p = A.dot( theta.dot(A) )\r\n while improved:\r\n improved = False\r\n if heur == 'first':\r\n p2 = -scipy.inf\r\n perm = scipy.random.permutation(N)\r\n for s in perm:\r\n #dp: change in p if A[i] bit is reversed\r\n dp = (1-2*A[s])*( A.dot(theta[s,:]+ theta[:,s]) ) + theta[s,s]\r\n if dp>0:\r\n p2 = dp\r\n break\r\n\r\n if heur == 'best':\r\n dp = (1-2*A)*( A.dot(theta + theta.T) ) + scipy.diag(theta)\r\n p2,s = dp.max(), dp.argmax()\r\n if p2 > 0:\r\n A[s] = 1-A[s]\r\n improved = True\r\n p += p2\r\n if p>max_p:\r\n greedy_A,max_p = A.copy(),p\r\n return greedy_A.astype(int),max_p", "def greedyOptimize(self, cpoints):\n # the currently best known energy is the current energy\n best_energy = self.totalEnergy(cpoints.values())\n best_before = best_energy\n cpoints_ = cpoints.copy()\n # iterate over each control point in order to find the movement\n # that improves it i.e. the snakes overall energy best\n cv = cpoints_.values()\n for i in range(len(cpoints_)):\n best_step = None \n # test all possible steps\n for step in self.step_directions:\n c1 = cpoints_[i]\n # only check a step if it ends within the image bounds\n if self.inImageBound(cpoints_[i] + step):\n # apply the step to the control point\n cpoints_[i] = cpoints_[i] + step\n # compute the new energy\n new = self.totalEnergy(cpoints_.values())\n # check wether it is a true improvement\n if new < best_energy:\n assert new < best_energy\n # update the currently best known energy\n best_energy = new\n best_step = step\n cv = cpoints_.values()\n cpoints_[i] = cpoints_[i] - step\n assert (c1[0], c1[1]) == (cpoints_[i][0], cpoints_[i][1])\n \n # apply the best step to the control point\n if best_step != None:\n cpoints_[i] = cpoints_[i] + best_step\n \n # ensure saneness\n assert np.array_equal(cv, cpoints_.values())\n self.bestenergy_debug = best_energy\n assert best_before >= best_energy, '(%s !>= %s) the optimized energy is not euqal-smaller than the energy before' % (best_before, best_energy)\n assert self.totalEnergy(cpoints_.values()) == best_energy, '(%s != %s) the new calculated energy does not equal the best calculated energy' % (self.totalEnergy(cpoints_.values()), best_energy)\n return cpoints_", "def maximize(self):\n self.abstract_obj.maximize()", "def move(self):\n for agent in self.agents:\n if not agent.fidelity:\n options = agent.get_move_options(agent.hex, self.kernel_size, None, extend=True)\n target = random36.choices(population=options,weights=[x.quality**2 for x in options])\n agent.move(target[0])", "def greedy_next_action(self, state):\n max_val = float('-inf')\n if self.verbose:\n cells = []\n max_candidates = {}\n for i in range(3):\n for j in range(3):\n if state[i][j] == VALUES.EMPTY:\n val = self.q_value((state, (i, j)))\n if val >= max_val:\n max_val = val\n max_move = (i, j)\n max_candidates[max_move] = val\n if self.verbose:\n cells.append('{0:.3f}'.format(val).center(6))\n elif self.verbose:\n cells.append(state[i][j].center(6))\n if self.verbose:\n self.logger.info(BOARD.format(*cells))\n possible_actions = [k for k, v in max_candidates.items() if v == max_val]\n action = random.choice(possible_actions) if len(possible_actions) > 0 else None\n return action", "def move(self, direction, max_height):\n if direction > 0:\n self.y_pos -= self.SPEED\n elif direction < 0:\n self.y_pos += self.SPEED\n\n if self.y_pos >= max_height - 40:\n self.y_pos = max_height - 40", "def make_move(self):\n\n # get relavent information\n affinity = self.get_affinity()\n sample_space = self.get_game_space()\n depth_limit = self.__search_depth\n\n # run a minimax search and get the best value\n bestval = MinimaxTree.minimax(self, sample_space, affinity, depth_limit, True)\n if bestval[0] is None: bestval = ((0,6),'x', 0)\n\n # print the number of nodes expanded \n print(self.nodes_expanded)\n\n # make the move found by the search \n self.get_game_space().set_tile(bestval[0][0], bestval[0][1], affinity)", "def action(self, direction: str, baby_position: tuple) -> None:\n assert direction in constants.BABY_MOVEMENTS\n # First move to pick up baby is they are adjacent\n if baby_position[0] == self.position[0]:\n if baby_position[1] == self.position[1] - 1:\n self.position[1] -= 1\n return\n elif baby_position[1] == self.position[1] + 1:\n self.position[1] += 1\n return\n elif baby_position[1] == self.position[1]:\n if baby_position[0] == self.position[0] - 1:\n self.position[0] -= 1\n return\n elif baby_position[0] == self.position[0] + 1:\n self.position[0] += 1\n return\n\n # not adjacent\n if np.random.random() < self.movement_probability:\n if self.dumb:\n if direction == \"N\":\n if self.position[0] != 0:\n self.position[0] -= 1\n elif direction == \"E\":\n if self.position[1] != self.board_dimensions[1] - 1:\n self.position[1] += 1\n elif direction == \"S\":\n if self.position[0] != self.board_dimensions[0] - 1:\n self.position[0] += 1\n elif direction == \"W\":\n if self.position[1] != 0:\n self.position[1] -= 1\n else:\n # Find out whether the baby is further away row-wise\n # or column-wise to decide movement\n dad_pos = self.position.copy()\n row_diff = baby_position[0] - dad_pos[0]\n col_diff = baby_position[1] - dad_pos[1]\n # Move in the direction with greatest difference\n if abs(row_diff) > abs(col_diff):\n if row_diff > 0:\n self.position[0] += 1\n else:\n self.position[0] -= 1\n return\n elif abs(row_diff) < abs(col_diff):\n if col_diff > 0:\n self.position[1] += 1\n else:\n self.position[1] -= 1\n return\n elif abs(row_diff) == abs(col_diff):\n if np.random.random() < 0.5:\n if row_diff > 0:\n self.position[0] += 1\n else:\n self.position[0] -= 1\n else:\n if col_diff > 0:\n self.position[1] += 1\n else:\n self.position[1] -= 1", "def move(self, state):\n \n self.depth_limit=1\n self.best_utility=-2\n action=None\n while not self.is_time_up():\n self.terminal=True\n self.cache={}\n action=self.alpha_beta_search(state,0)\n if self.terminal==True:\n break\n self.depth_limit=self.depth_limit+1\n \n return action", "def isFeasible(self):\n if self.function.constraints(self.position[0],self.position[1]) == False:\n self.position = np.array([random.uniform(-50,50), random.uniform(-50,50)]) \n self.velocity = np.array([random.uniform(-1,1), random.uniform(-1,1)])", "def opt_settlement(player, board, gains, goal=\"default\"):\n goal_index = goal_list.get(goal, 0)\n vertex_score = lambda t: vertex_eval(player, board, t[0], gains, goal_index)\n vertex_list = [(v, board.get_vertex_location(v)) for v in range(board.max_vertex+1) \\\n if board.if_can_build(\"settlement\", *(board.get_vertex_location(v)))]\n return max(vertex_list, key = vertex_score, default=(None, None))", "def findHeuristic(self, _, __):\n popSize = 100\n retain = 0.25\n random_select = 0.1\n mutate = 0.1\n\n popList = self.populationList(popSize)\n\n solved = False\n count = 0\n while not solved:\n # evolves current\n popList = (self.evolve(popList, retain, random_select, mutate))\n# print(popList) # for troubleshooting\n for i in popList:\n if (self.fitness(i) == 0):\n print(\"solution: \", i)\n solved = True\n break\n # if plateus at a local minima, then end after 50 generations\n if count >= 50:\n if (self.fitness(i) <= 10):\n print(\"solution: \", i)\n solved = True\n break\n if solved is True:\n break\n print(\"-----------------\")\n\n # will modify mutation, random_select and retain values to help leave a\n # local minima. More randomness the longer it takes up to specific points\n if count % 3 == 0:\n if mutate < 0.2:\n mutate += 0.01\n if random_select < 0.3:\n random_select += 0.01\n count += 1\n\n return exit(0)", "def qlearn(self, num_simulations):\n initial_maze_loc = self.maze.location\n for i in range(num_simulations):\n curr_coord = self.maze.location\n new_epsilon = round(1 - (i+1)/num_simulations, 2)\n self.epsilon = new_epsilon if new_epsilon > 0 else self.epsilon\n\n while (self.grid[curr_coord[0]][curr_coord[1]] != 'G' and\n self.grid[curr_coord[0]][curr_coord[1]] != 'E'):\n rand_num = round(random.random(), 2)\n\n move = (0,0)\n if rand_num < self.epsilon: # exploration\n move = random.choice(self.maze.moves())\n else: # exploitation\n possible_moves = self.maze.moves()\n best_next_move_q = 0\n for pmove in possible_moves:\n if (self.qtable[curr_coord[0]+pmove[0]][curr_coord[1]+pmove[1]] >=\n best_next_move_q):\n move = pmove\n best_next_move_q = (\n self.qtable[curr_coord[0]+pmove[0]][curr_coord[1]+pmove[1]])\n\n self.q(curr_coord, move)\n curr_coord = (curr_coord[0]+move[0], curr_coord[1]+move[1])\n self.maze.location = curr_coord\n self.maze.location = initial_maze_loc\n #print(f\"Simulation {i+1} of {num_simulations} complete.\")", "def Maximize(self):\r\n\r\n return self.SetFlag(self.optionMaximized, True)", "def direction_correction(self):\n self.directions.monster = random.uniform(self.directions.monster * self.get_monster_sensitivity(),\n self.directions.monster * (1 + (1 - self.get_monster_sensitivity())))\n self.directions.food = random.uniform(self.directions.food * self.get_food_sensitivity(),\n self.directions.food * (1 + (1 - self.get_food_sensitivity())))\n self.directions.water = random.uniform(self.directions.water * self.get_water_sensitivity(),\n self.directions.water * (1 + (1 - self.get_water_sensitivity())))", "def minimize(A):\n return determinize(reverse(determinize(reverse(A))))", "def maximal_destination_for_passenger(state, problem):\n unsatisfied = [p for p in state.passengers if not (p.is_arrived() or p.onboard)]\n if unsatisfied:\n max_dist = max([p.opt for p in unsatisfied])\n return max_dist\n return 0", "def up(self, target, guess):\r\n return ord(target) < ord(guess)", "def solveOneStep(self):\n ### Student code goes here\n\n if self.currentState.state == self.victoryCondition:\n return True\n\n current_depth = self.currentState.depth\n found_move = False\n while self.currentState.parent:\n self.gm.reverseMove(self.currentState.requiredMovable)\n self.currentState = self.currentState.parent\n count = self.currentState.nextChildToVisit\n if len(self.currentState.children) > count:\n found_move = True\n break\n if not found_move:\n for all_visited in self.visited.keys():\n all_visited.nextChildToVisit = 0\n current_depth += 1\n if len(self.visited) == 1:\n all_possible_moves = self.gm.getMovables()\n for every_move in all_possible_moves:\n self.gm.makeMove(every_move)\n new_game_state = GameState(self.gm.getGameState(), current_depth, every_move)\n new_game_state.parent = self.currentState\n self.visited[new_game_state] = False\n self.currentState.children.append(new_game_state)\n self.gm.reverseMove(every_move)\n while current_depth != self.currentState.depth:\n count = self.currentState.nextChildToVisit\n self.currentState.nextChildToVisit += 1\n if len(self.currentState.children) > count:\n self.currentState = self.currentState.children[count]\n next_move = self.currentState.requiredMovable\n self.gm.makeMove(next_move)\n else:\n found_move = False\n while self.currentState.parent:\n self.gm.reverseMove(self.currentState.requiredMovable)\n self.currentState = self.currentState.parent\n if len(self.currentState.children) > self.currentState.nextChildToVisit:\n found_move = True\n break\n if not found_move:\n return False\n\n if self.currentState.state != self.victoryCondition:\n self.visited[self.currentState] = True\n all_possible_moves = self.gm.getMovables()\n next_depth = current_depth + 1\n for every_move in all_possible_moves:\n self.gm.makeMove(every_move)\n new_game_state = GameState(self.gm.getGameState(), next_depth, every_move)\n if new_game_state not in self.visited:\n self.visited[new_game_state] = False\n new_game_state.parent = self.currentState\n self.currentState.children.append(new_game_state)\n self.gm.reverseMove(every_move)\n return False\n else:\n return True", "def minimal_weight_matching(X, Y, population_size= 12, nr_iterations=50,\n save_best_nr=1, setup_replacement = 'delete-all', norm_order=2, m_rate=0.05):\n # to save best members\n history = np.zeros((nr_iterations, save_best_nr))\n history_avg = np.zeros((nr_iterations, save_best_nr))\n best_tuple = [None, 0]\n # --------------------- Initialization\n population = initial_population(len(X), population_size)\n # todo\n stagnation_counter = 0\n for ite in range(nr_iterations):\n #print(population)\n # --------------------- Evaluation\n # get list of fitness scores for memebrs in population\n fitness_scores = evaluation(X, Y, population, norm_order)\n #print(fitness_scores)\n # get the values of the best save_best_nr members and save it in the\n # history\n maxmax = max(fitness_scores)\n if maxmax > best_tuple[1]:\n best_tuple = population[fitness_scores.index(maxmax)], maxmax\n history[ite] = sorted(fitness_scores)[-save_best_nr:]\n history_avg[ite] = np.mean(fitness_scores)\n # todo: if we dont move for 10 steps, half the mutation rate\n if history[ite] == history[ite-1]:\n stagnation_counter += 1\n if stagnation_counter == 10:\n m_rate *= 0.25\n stagnation_counter = 0\n else:\n stagnation_counter = 0\n if m_rate <= 0.001:\n m_rate = 0.05\n print(\"--- Iteration {} --> Best:[{}] ||| Avg:[{}] ||| Stagnation Level {} ||| MR {}\".format(ite+1, history[ite], history_avg[ite], stagnation_counter, m_rate))\n # --------------------- Selection\n # select members based on roulette_wheel_selection\n #selected_indx = roulette_wheel_selection(fitness_scores, population_size//2)\n selected_indx = simple_selection(fitness_scores, population_size//2)\n # given indexes, get the selecter members (POPULATION_SIZE//2)\n selected_members = population[selected_indx]\n #print(selected_members)\n # shuffle\n np.random.shuffle(selected_members)\n # create empty array to save children in\n children = np.empty((population_size//2, population.shape[1])).astype(int)\n\n # --------------------- Crossover\n for i in range(0, population_size//2, 2):\n # parent one is in selected_members in row 1, parent two in\n # row 2 ...\n off1, off2 = k_point_crossover(selected_members[i], selected_members[i+1])\n\n # --------------------- Mutation\n # save created children in children array\n children[i], children[i+1] = \\\n mutation(off1, p_m=m_rate), mutation(off2, p_m=m_rate)\n\n\n # ---------------------- Replacement\n population = replacement(population, children, mode=setup_replacement, n=children.shape[0],\n based_on_fitness=True, fitness_old=fitness_scores, fitness_new=evaluation(X, Y, children, norm_order)).astype(int)\n\n # add the best to the population at place 1 [not good...]\n population[0] = best_tuple[0]\n\n return population, best_tuple, history, history_avg", "def select_roulette(solver, pop, bias=1, minimising=False):\n assert min(c.fitness for c in pop) >= 0\n assert len(pop) > 0\n assert minimising is False # Done to ensure consistency. Check could be moved to solver (roulette can only do maxi)\n\n point = random.uniform(0, sum(c.fitness**bias for c in pop))\n # print('Chose ', point, ' out of ', sum(c.fitness**bias for c in pop))\n for p in pop:\n point -= p.fitness**bias\n if point <= 0:\n break\n return p", "def optimize_library(self, target, method='descent', direction='max',\n **kwargs):\n if method == 'descent':\n return self.optimize_library_descent(target, direction, **kwargs)\n elif method == 'descent_multiple' or method == 'descent-multiple':\n return self.optimize_library_descent_multiple(target, direction,\n **kwargs)\n elif method == 'anneal':\n return self.optimize_library_anneal(target, direction, **kwargs)\n \n else:\n raise ValueError('Unknown optimization method `%s`' % method)", "def fit(self):\n self._minuit_problem.migrad() # run optimizer\n self._status = 0 if self._minuit_problem.migrad_ok() else 1", "def solveOneStep(self):\n ### Student code goes here\n # Mark this move as explored\n self.visited[self.currentState] = True\n self.visited_states.append(self.currentState.state)\n\n # Get move to make\n movables = self.gm.getMovables()\n # print(\"EXPLORING GAME STATE \" + str(self.gm.getGameState()) + \"---------------------------------------------------------\")\n to_move = self.currentState.nextChildToVisit # movables index\n # print(\"depth \", self.currentState.depth)\n\n # Return if done\n if self.currentState.state == self.victoryCondition:\n # print(\"DONE\")\n return True\n\n # If current state has no children, make children\n if not self.currentState.children:\n for movable_statement in movables:\n # Make the move\n # print(\"implementing move \", movable_statement)\n self.gm.makeMove(movable_statement)\n\n # Create a new state with this move made\n new_state = self.gm.getGameState()\n # print (\"new state \", new_state)\n\n # If the new state hasn't been visited and isn't in the queue then add it as a child and to the queue\n if (new_state not in self.visited_states):\n new_gs = GameState(new_state, self.currentState.depth + 1, movable_statement)\n new_gs.parent = self.currentState\n self.currentState.children.append(new_gs)\n self.currentState.nextChildToVisit = to_move + 1\n self.visited[new_gs] = True\n self.visited_states.append(new_state)\n self.gs_queue.append(new_gs)\n\n self.gm.reverseMove(movable_statement)\n\n # Return false if no more to explore\n if not self.gs_queue:\n return False\n\n # Revert to state at when current and next start to change\n root_curr = self.currentState\n self.currentState = self.gs_queue.popleft()\n root_new = self.currentState\n\n # Backtrack to when current node and new node start to diverge\n if root_new.depth == root_curr.depth:\n while root_curr.state != root_new.state:\n self.gm.reverseMove(root_curr.requiredMovable)\n root_curr = root_curr.parent\n root_new = root_new.parent\n else:\n while root_curr.requiredMovable:\n self.gm.reverseMove(root_curr.requiredMovable)\n root_curr = root_curr.parent\n\n # Return game master to state that we are exploring\n # Find path between root and current state\n path = []\n currNode = self.currentState\n while currNode != root_curr:\n path.append(currNode.requiredMovable)\n currNode = currNode.parent\n\n # Created backwards path, now make moves from root to current state\n path.reverse()\n for movable_statement in path:\n self.gm.makeMove(movable_statement)\n\n return False", "def move(self, direction):\n moved = False\n initial_tiles = self.dir_dic[direction]\n offset = OFFSETS[direction]\n if direction == UP or direction == DOWN:\n bound = self.grid_height\n else:\n bound = self.grid_width\n for tile in initial_tiles:\n temp = [self.get_tile(tile[0] + idx*offset[0], tile[1] + idx*offset[1]) \n for idx in range(bound)]\n temp = merge(temp)\n \n for idx in range(bound):\n row = tile[0] + idx*offset[0]\n col = tile[1] + idx*offset[1]\n if self.get_tile(row, col) != temp[idx]:\n moved = True\n self.set_tile(row, col, temp[idx]) \n if moved:\n self.new_tile()", "def use_manhatten_heur(self):\r\n\t\tdistance = 0\r\n\r\n\t\tfor row in range(self.n):\r\n\t\t\tfor col in range(self.n):\r\n\t\t\t\tintendedX, intendedY = BoardClass.goalTileLocations[self.board[row][col]]\r\n\t\t\t\tdistance += (abs(row - intendedX) + abs(col - intendedY))\r\n\r\n\t\tself.heuristic = distance", "def maximize(self,x0,method='fmin',**kwargs):\n return self._optimize(x0,'max',method,**kwargs)", "def solve(allowed_moves, demo=False, start_pos=0, finish_pos=-1):\r\n matrix = load_values(demo)\r\n start = matrix.get_start(start_pos)\r\n\r\n def is_shorter(node, n_node):\r\n \"\"\" Checks if path is shorter. \"\"\"\r\n return True if node.distance + n_node.value < n_node.distance else False\r\n\r\n def append_neighbours(queue, node):\r\n \"\"\" Adds neighbours to queue. \"\"\"\r\n neighbours = [getattr(matrix, move)(node) for move in allowed_moves]\r\n neighbours = [i for i in neighbours if i]\r\n for n_node in neighbours:\r\n if is_shorter(node, n_node):\r\n n_node.distance = node.distance + n_node.value\r\n heappush(queue, n_node)\r\n return True\r\n\r\n queue = []\r\n heappush(queue, start)\r\n while True:\r\n node = heappop(queue)\r\n node.visited = True\r\n if (finish_pos is not True and node != matrix.get_last(finish_pos)) \\\r\n or (finish_pos is True and \\\r\n node.count % matrix.height != matrix.width - 1):\r\n append_neighbours(queue, node)\r\n else:\r\n break\r\n\r\n return node.distance", "def result(board, legal_moves, max_util, min_util, depth,flag,player,transition_model):\r\n \r\n global max_depth\r\n global nodes_generated\r\n global min_prunes\r\n global max_prunes\r\n global depth_limit\r\n global start_time\r\n \r\n flag = None\r\n if player == -1:\r\n for j in range(0,len(legal_moves)):\r\n move = legal_moves[j]\r\n new_board = camelot_board.Camelot(list(board.white), list(board.black))\r\n for i in range(0,len(new_board.black)):\r\n if new_board.black[i] == move[0]:\r\n new_board.black[i] = move[1]\r\n if len(move) == 3:\r\n flag = move \r\n new_board.white.remove(move[2])\r\n v = max_value(new_board,max_util,min_util,depth+1)\r\n transition_model[j] = v\r\n else:\r\n for j in range(0,len(legal_moves)):\r\n move = legal_moves[j]\r\n new_board = camelot_board.Camelot(list(board.white), list(board.black))\r\n for i in range(0,len(new_board.white)):\r\n if new_board.white[i] == move[0]:\r\n new_board.white[i] = move[1]\r\n if len(move) == 3:\r\n flag = move \r\n new_board.black.remove(move[2])\r\n v = min_value(new_board,max_util,min_util,depth+1)\r\n transition_model[j] = v \r\n\r\n return flag", "def step(self):\n if self.model.schedule.steps < self.model.residential_steps:\n residential_move = True\n else:\n residential_move = False\n\n\n if residential_move:\n # only step the agents if the number considered is not exhausted\n if self.model.total_considered < self.model.residential_moves_per_step:\n # move residential\n U_res = self.get_res_satisfaction(self.pos)\n self.model.res_satisfaction.append(U_res)\n\n # print(\"U_res\",U_res)\n if U_res < self.T:\n\n # todo: implement different move schemes, for now only random\n # find all empty places\n # rank them\n # take one with boltzmann probability.\n self.evaluate_move(U_res, school=False)\n\n else:\n self.model.res_happy += 1\n\n self.model.total_considered += 1\n #print(\"considered\",self.model.total_considered)\n\n\n else:\n if self.model.total_considered < self.model.school_moves_per_step:\n # school moves\n # satisfaction in current school\n U = self.get_school_satisfaction(self.school, self.dist_to_school)\n self.model.satisfaction.append(U)\n\n # If unhappy, compared to threshold move:\n if U < self.T:\n #print('unhappy')\n self.evaluate_move(U, school=True)\n\n else:\n self.model.happy += 1\n if self.model.total_considered>0:\n self.model.percent_happy = np.ma(self.model.happy/self.model.total_considered)", "def minimization_steps(ssa):\r\n old = cfg\r\n while True:\r\n null_choice_elim(cfg)\r\n rename_elimination(cfg)\r\n tf = True\r\n for ins,block in cfg._blockmap.items():\r\n if block.body == old._blockmap[ins].body: tf = False \r\n old = cfg\r\n if not tf: break", "def SA(targetMDG):\n hill_climbers = []\n for i in range(NUM_Population):\n hill_climbers.append(SimulatedAnnealing(targetMDG))\n\n completed_climbers = []\n completed_max_climbers = []\n\n # k: int, number of neighbors to be considered\n k = 20\n i = 0\n not_increased = 0\n max_score = 0\n\n while True:\n for climber in hill_climbers[:]:\n result = climber.climb_with_annealing(k, i)\n if not result:\n completed_climbers.append(climber)\n hill_climbers.remove(climber)\n max_completed_climber = SimulatedAnnealing(targetMDG)\n max_completed_climber.result = climber.max_result\n max_completed_climber.update_score()\n completed_max_climbers.append(max_completed_climber)\n\n total_climbers = hill_climbers + completed_climbers + completed_max_climbers\n total_climbers.sort()\n print(\"Iteration \", i, \": \", total_climbers[-1].score)\n\n if total_climbers[-1].score - max_score != 0:\n not_increased = 0\n else:\n not_increased += 1\n\n if len(hill_climbers) == 0 or not_increased == 10:\n break\n i += 1\n max_score = total_climbers[-1].score\n\n total_climbers = hill_climbers + completed_climbers + completed_max_climbers\n total_climbers.sort()\n\n max_climber = total_climbers[-1]\n\n print(\"TurboMQ = \", max_climber.score)\n for c in max_climber.result: # print all clusters which are not singleton\n if 1 != len(c.get_nodes()):\n print(c.get_nodes())\n\n return max_climber.result", "def atomistic_step(self):\n # first minimize in vacuum, in either case, \n # fixes problems with langevin bond deformation.\n self.system.minimize()\n \n if self.system.should_solvate:\n with self.system.solvate() as sol:\n with self.system.minimize(**sol) as mn:\n with self.system.equilibriate(**mn) as eq:\n self.system.md(**eq)\n else:\n self.system.equilibriate()\n self.system.md()", "def solveOneStep(self):\n ### Student code goes here\n\n if self.currentState.state == self.victoryCondition:\n return True\n\n current_move = False\n current_depth = self.currentState.depth + 1\n list_movables = self.gm.getMovables()\n\n while not current_move:\n count = self.currentState.nextChildToVisit\n if len(list_movables) <= count:\n if not self.currentState.parent:\n return False\n else:\n self.gm.reverseMove(self.currentState.requiredMovable)\n list_movables = self.gm.getMovables()\n self.currentState = self.currentState.parent\n current_depth = self.currentState.depth + 1\n continue\n\n next_move = list_movables[count]\n self.gm.makeMove(next_move)\n new_game_state = GameState(self.gm.getGameState(), current_depth, next_move)\n if new_game_state in self.visited:\n self.currentState.nextChildToVisit += 1\n self.gm.reverseMove(next_move)\n else:\n self.currentState.nextChildToVisit += 1\n new_game_state.parent = self.currentState\n self.currentState.children.append(new_game_state)\n self.currentState = new_game_state\n current_move = next_move\n\n if self.currentState.state != self.victoryCondition:\n self.visited[self.currentState] = True\n return False\n else:\n return True", "def update_state(state, Y_next):\n if max(Y_next) > state.best_value + 1e-3 * fabs(state.best_value):\n state.success_counter += 1\n state.failure_counter = 0\n else:\n state.success_counter = 0\n state.failure_counter += 1\n\n if state.success_counter == state.success_tolerance: # Expand trust region\n state.length = min(2.0 * state.length, state.length_max)\n state.success_counter = 0\n elif state.failure_counter == state.failure_tolerance: # Shrink trust region\n state.length /= 2.0\n state.failure_counter = 0\n\n state.best_value = max(state.best_value, max(Y_next).item())\n if state.length < state.length_min:\n state.restart_triggered = True\n\n return state", "def move(m, biggest, current, times):\n for zzz in range(times):\n c1 = current.clockwise_cup\n c2 = c1.clockwise_cup\n c3 = c2.clockwise_cup\n c4 = c3.clockwise_cup\n current.clockwise_cup = c4\n result = c4\n destination = current.label - 1\n while destination == c1.label or destination == c2.label or destination == c3.label:\n destination -= 1\n \n if destination <= 0:\n destination = biggest\n at_destination = m[destination]\n c3.clockwise_cup = at_destination.clockwise_cup\n at_destination.clockwise_cup = c1\n current = result\n return result", "def maximize(self):\n return [not self.minimize]", "def ghost_greedy(self,current,ghost,g_pos):\n x, y, pos ,(st,b),start,p_prob = g_pos[ghost]\n node = self.nodes_array[x][y].getNeighborByDirection(pos)\n if st > 0 and b and node is not None:\n st = max(st-1,0)\n return [(1,(node.i,node.j,pos,(st,not b),start,p_prob))]\n\n rpos = Actions.reverseDirection(pos)\n node = self.nodes_array[x][y]\n priority = [Directions.NORTH, Directions.WEST, Directions.SOUTH, Directions.EAST]\n node_neig = [node.up, node.left, node.down, node.right]\n prio = list()\n nodes = list()\n for i in range(len(node_neig)):\n if node_neig[i]:\n prio.append(priority[i])\n nodes.append((node_neig[i].i,node_neig[i].j))\n\n if len(prio) > 1 and rpos != Directions.STOP:\n for i in range(len(prio)):\n if prio[i] == rpos:\n del prio[i]\n del nodes[i]\n break\n\n arg = min if st == 0 else max\n dist_list = [manhattanDistance(pos, (current.i,current.j)) for pos in nodes]\n ptr_val = arg(dist_list)\n pos = []\n st = st - 1 if st - 1 > 0 else 0\n for i in range(len(dist_list)):\n if dist_list[i] == ptr_val:\n if not st:\n pos.append((nodes[i][0],nodes[i][1],prio[i],(0,False),start,p_prob))\n else:\n pos.append((x,y,prio[i],(st,True),start,p_prob))\n\n pos_p = 1./len(pos)\n return [(pos_p,possibility) for possibility in pos]", "def ignore_biasbn(directions):\n for d in directions:\n if d.dim() <= 1:\n d.fill_(0)", "def main():\n\n # The X's represent the boundaries of the maze.\n # Reaching state G results in a reward of +1.\n # Reaching state E results in a reward of -1.\n grid = [ # 4 x 3 maze.\n \"XXXXXX\",\n \"X GX\",\n \"X X EX\",\n \"X X\",\n \"XXXXXX\"\n ]\n\n grid2 = [ # 10 x 8 maze.\n \"XXXXXXXXXXXX\",\n \"X X X\",\n \"X X XXXXXX X\",\n \"X EX\",\n \"XX XXXXXX X\",\n \"X X X X\",\n \"X XX XGXX X\",\n \"X XX X X\",\n \"XXXX XX\",\n \"XXXXXXXXXXXX\"\n ]\n\n maze = Maze(grid, (2, 1))\n maze.display()\n\n agent = QAgent(maze)\n agent.qlearn(250)\n path = agent.solve_maze()\n\n while path:\n move = path.pop(0)\n maze = maze.neighbor(move)\n time.sleep(0.50)\n maze.display()\n\n print(\"path: \" + str(path))\n print(\"Q table:\")\n print(agent.get_qtable_str())", "def move(self, direction: str) -> int:\n x, y = self.snake[-1][0] + self.dirs[direction][0], self.snake[-1][1] + self.dirs[direction][1]\n # print(x,y)\n # went out bound\n if x >= self.n or y >= self.m or x < 0 or y < 0:\n return -1\n tail = self.snake.popleft()\n self.snake_set.discard(tail)\n if (x, y) in self.snake_set:\n return -1\n\n if len(self.food) > 0 and (x, y) == (self.food[0][0], self.food[0][1]):\n self.food.popleft()\n self.snake.appendleft(tail)\n self.snake_set.add(tail)\n self.snake.append((x, y))\n self.snake_set.add((x, y))\n\n return len(self.snake) - 1", "def target_m_dqn(model, target_network, states, next_states, actions,rewards, terminals, \n cumulative_gamma,tau,alpha,clip_value_min):\n \n #----------------------------------------\n q_state_values = jax.vmap(target_network, in_axes=(0))(states).q_values\n q_state_values = jnp.squeeze(q_state_values)\n \n next_q_values = jax.vmap(target_network, in_axes=(0))(next_states).q_values\n next_q_values = jnp.squeeze(next_q_values)\n #----------------------------------------\n\n tau_log_pi_next = stable_scaled_log_softmax(next_q_values, tau, axis=1)\n pi_target = stable_softmax(next_q_values,tau, axis=1)\n replay_log_policy = stable_scaled_log_softmax(q_state_values, tau, axis=1)\n\n #----------------------------------------\n \n replay_next_qt_softmax = jnp.sum((next_q_values-tau_log_pi_next)*pi_target,axis=1)\n\n replay_action_one_hot = nn.one_hot(actions, q_state_values.shape[-1])\n tau_log_pi_a = jnp.sum(replay_log_policy * replay_action_one_hot, axis=1)\n\n #a_max=1\n tau_log_pi_a = jnp.clip(tau_log_pi_a, a_min=clip_value_min,a_max=1)\n\n munchausen_term = alpha * tau_log_pi_a\n modified_bellman = (rewards + munchausen_term +cumulative_gamma * replay_next_qt_softmax *\n (1. - jnp.float32(terminals)))\n \n return jax.lax.stop_gradient(modified_bellman)", "def _set_reward_value(self, start, end, direction):\n for i in range(self.max_time - 1):\n dir_relation_map = {1: 3, 3: 1, 4: 2, 2: 4}\n self.rewards[start + i][direction] = 1\n self.rewards[end + i][dir_relation_map[direction]] = 1", "def move_down():\n return __maze.move_down()", "def solve(self):\n while self.counter[-1] != len(self.sequences[-1]) + 1:\n basepair = self.generatebasepairs(self.counter) # Get the combination for the current coordination\n moves = self.generatemoves(basepair) # Get all possible ways to get to this current coordination\n\n maxscore = -100000000 # set the maxscore to a value which is always lower than possible scores\n bestmove = None\n\n # FOr each move calculate score\n for move in moves:\n coordinates = self.generatecoordinates(move, self.counter) # generate the origin coordinate for the current move\n score = self.retrievematrixelement(coordinates).score # Get the score at the origin coordinate\n pairs = self.getallpairs(move) # Get all pairs possible for the current move\n scores = [self.scorePair(u) for u in pairs] # Generate scores for all pairs\n newscore = score + sum(scores) # Add generated scores to origin score\n if newscore > maxscore:\n maxscore = newscore\n bestmove = coordinates\n\n self.enterelement(self.counter, Score(bestmove, maxscore))\n self.increase()", "def move(self, direction: Direction):\n def opposite(dir1: Direction, dir2: Direction) -> bool:\n if not isinstance(dir1, Direction) or not isinstance(dir2, Direction):\n raise ValueError(\"This method can compare only directions\")\n if dir1.value[0] == dir2.value[0] == 0 and dir1.value[1] == -dir2.value[1]:\n return True\n elif dir1.value[1] == dir2.value[1] == 0 and dir1.value[0] == -dir2.value[0]:\n return True\n else:\n return False\n\n if not opposite(self.direction, direction):\n new_head = Position(self._head.x + direction.value[0], self._head.y + direction.value[1])\n if new_head not in self.body:\n self.body.insert(0, new_head)\n self._head = new_head\n self.direction = direction\n tail = self.body.pop()\n self.__trace.append(tail)\n else:\n raise ValueError(\"Snake bite itself\")", "def _select_heuristic(self):\n\n # take a sample of rewards from the current prior of heuristics\n sample_rewards = np.random.normal(self.prior_mus, self.prior_sigmas)\n\n # select the heuristic that has the highest reward sample value\n self.best_heuristic_idx = np.argmax(sample_rewards)\n self.best_heuristic = self.heuristics[self.best_heuristic_idx]\n self.heuristic_selection.append(self.best_heuristic_idx)", "def foodHeuristic(state, problem):\n import itertools\n\n\n\n def manhattan(startPosition, targetPosition):\n xy1 = startPosition\n xy2 = targetPosition\n return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])\n\n position, foodGrid = state\n\n return len(foodGrid.asList())\n #\n # \"\"\"\n # The below algorithm is from:\n # https://stackoverflow.com/questions/9994913/pacman-what-kinds-of-heuristics-are-mainly-used\n #\n # Find real/manhattan distance between two currently furthest fruits in labyrinth - let's call that x.\n # Find real/manhattan distance from current Pacman position to the closer of previous two fruits - let's call that y.\n # Then, answer is just: x + y.\n # The interpretation of this x + y formula could be something like this:\n #\n # x - either way, you will have to travel this distance, at least at the end\n # y - while you are at the some of the two furthest fruits, it's better to collect\n # the food that is near to it so you don't have to go back\n # \"\"\"\n # maxFoodPairDistance = 0\n #\n # if len(foodGrid.asList()) >= 2:\n #\n # #calculate manhattan/real distance between each pair of food (all permutations in foodGrid) and find the maximum of them, and\n # #store the pair with max distance in maxFoodPair\n # for foodPair in itertools.permutations(foodGrid.asList(),2):\n # #foodPairDistance = mazeDistance(foodPair[0], foodPair[1], problem.startingGameState)\n # foodPairDistance = manhattan(foodPair[0], foodPair[1])\n # if foodPairDistance >= maxFoodPairDistance:\n # maxFoodPairDistance = foodPairDistance\n # maxFoodPair = foodPair\n #\n # #get the real distance between pacman and nearest food among the max distance food pair we get above. Using real distance instead\n # #of manhattan distance here just to \"reduce\" the number of nodes expand to get additional point. But that's a bit of a cheating\n # #because the mazeDistance function use of breadth First search - which itself is a search with nodes expansion not counted here\n # #minPacmanToFoodDistance = min([mazeDistance(position, foodPosition, problem.startingGameState) for foodPosition in maxFoodPair])\n # minPacmanToFoodDistance = min([manhattan(position, foodPosition) for foodPosition in maxFoodPair])\n #\n # #When only one food left, just return the real distance between pacman and food\n # elif len(foodGrid.asList()) == 1:\n # foodPosition = foodGrid.asList()[0]\n # #minPacmanToFoodDistance = mazeDistance(position, foodPosition, problem.startingGameState)\n # minPacmanToFoodDistance = manhattan(position, foodPosition)\n # else:\n # minPacmanToFoodDistance = 0\n #\n # return minPacmanToFoodDistance + maxFoodPairDistance", "def open_up(self):\n\n self.move(self.__max_step__)", "def shiftAsideMark(state, opp, distDemar):\n dest = None\n while True:\n dest = Vector2D.create_random(low=-1, high=1)\n dest.norm = distDemar\n dest += opp.position\n if state.is_valid_position(dest) and \\\n distance_horizontale(dest, state.my_goal) > 10.+distance_horizontale(opp.position, state.my_goal):\n break\n return goTo(state, dest)", "def find_best_move(state: GameState) -> None:", "def move(self, direction):\n change_check = False\n for tile in self.dir_dict[direction]:\n if direction == UP or direction == DOWN:\n temp_list = []\n for step in range(self.grid_height):\n temp_list.append(self.grid[tile[0] + step * OFFSETS[direction][0]]\n [tile[1] + step * OFFSETS[direction][1]])\n if not temp_list == merge(temp_list):\n change_check = True\n temp_list = merge(temp_list)\n for step in range(self.grid_height):\n self.grid[tile[0] + step * OFFSETS[direction][0]] \\\n [tile[1] + step * OFFSETS[direction][1]] \\\n = temp_list[step]\n if direction == LEFT or direction == RIGHT:\n temp_list = []\n for step in range(self.grid_width):\n temp_list.append(self.grid[tile[0] + step * OFFSETS[direction][0]]\n [tile[1] + step * OFFSETS[direction][1]])\n if not temp_list == merge(temp_list):\n change_check = True\n temp_list = merge(temp_list)\n for step in range(self.grid_width):\n self.grid[tile[0] + step * OFFSETS[direction][0]] \\\n [tile[1] + step * OFFSETS[direction][1]] \\\n = temp_list[step]\n if change_check == True:\n self.new_tile()", "def move_up():\n return __maze.move_up()", "def _optimize(self) -> None:\n\n for i, agent in enumerate(self.agents):\n states, actions, rewards, next_states, dones = self.memory.sample()\n\n actor_next_state = self._agent_states(i, next_states)\n next_actions = torch.cat(\n [a.actor_target(actor_next_state) for a in self.agents], 1\n )\n next_q = agent.critic_target(next_states, next_actions).detach()\n target_q = rewards[:, i].view(-1, 1) + self.gamma * next_q * (\n 1 - dones[:, i].view(-1, 1)\n )\n local_q = agent.critic_local(states, actions)\n\n value_loss = agent.loss_fn(local_q, target_q)\n agent.value_optimizer.zero_grad()\n value_loss.backward()\n agent.value_optimizer.step()\n\n local_actions = []\n for i, a in enumerate(self.agents):\n local_states = self._agent_states(i, states)\n local_actions.append(\n a.actor_local(local_states)\n if a == agent\n else a.actor_local(local_states).detach()\n )\n local_actions = torch.cat(local_actions, 1)\n policy_loss = -agent.critic_local(states, local_actions).mean()\n\n agent.policy_optimizer.zero_grad()\n policy_loss.backward()\n agent.policy_optimizer.step()\n\n self._update_target_model(agent.critic_local, agent.critic_target)\n self._update_target_model(agent.actor_local, agent.actor_target)", "def minimax(self, state, agent, parents_positions):\n if termialTest(state):\n return utility(state)\n return self.computeMinimaxScore(state, agent, parents_positions)", "def _find_move_direction(fun,keys:list,params:dict,upper_point:dict,lower_point:dict,move_up:dict)->tuple:\n best_score = np.Inf\n move_space = {key: [False, True] for key in params.keys()}\n\n for move in grid(move_space):\n param = {}\n for key in keys:\n if move[key]:\n param[key] = params[key][upper_point[key]]\n else:\n param[key] = params[key][lower_point[key]]\n score = fun(param)\n if score < best_score:\n move_up = move\n best_score = score\n return (best_score,move_up)", "def constraint(arg: tp.Any) -> bool: # pylint: disable=unused-argument\n return bool(optimizer.parametrization.random_state.rand() > 0.8)", "def __init__(self, direction, max_momentum, current_momentum=None, max_speed=2, current_speed=0, rowing=False):\n self.direction = direction\n self.max_momentum = max_momentum\n if current_momentum is None:\n self.current_momentum = self.max_momentum\n else:\n self.current_momentum = current_momentum\n self.max_speed = max_speed\n self.current_speed = current_speed\n self.rowing = rowing", "def maximizer(evaluate):\n def strategy(player, board):\n def score_move(move):\n return evaluate(player, Othello.make_move(move, player, list(board)))\n return max(Othello.legal_moves(player, board), key=score_move)\n return strategy", "def gopt_max(fun, bounds, n_warmup = 1000, n_local = 10):\n x_best, y_best = gopt_min(lambda x: -fun(x), bounds, n_warmup, n_local)\n return x_best, -y_best", "def adjust(variable, target, d=20):\n if variable>d+target:\n variable -= d\n elif variable<target-d:\n variable += d\n else:\n variable = target\n return variable", "def dfs_maximizing(state) :\n #print state.describe_previous_move()\n global state_evals, path, _path, _score, level, _state;\n\n level+=1\n path.append(state)\n for stt in state.generate_next_states():\n score=0\n agenda.append((stt, level))\n \n if stt.is_game_over():\n state_evals+=1\n score=stt.get_endgame_score()\n if score>_score:\n _score=score\n _path = path[0:]\n _state = stt\n if not agenda:\n\n _path.append(_state)\n return [_path, _score, state_evals];\n else:\n new_state, level=agenda.pop()\n path=path[0:level]\n level-=1\n return dfs_maximizing(new_state)", "def solve(instance, silent=True, max_weight_lower=1,\n max_weight_upper=float('inf'), scoring=\"sink distance\"):\n flow = instance.flow\n k = instance.k\n\n # quit right away if the instance has weight bounds that can't be satisfied\n if instance.has_bad_bounds():\n return set()\n\n # if k equals the size of the largest edge cut, the weights are\n # predetermined\n if instance.k == max(len(C) for C in instance.edge_cuts):\n largest_cut = max(instance.edge_cuts, key=len)\n # Important: path weights must be sorted, otherwise our\n # subsequent optimizations will remove this constraint.\n weights = list(sorted(w for _, w in largest_cut))\n return solve_dp(instance, silent=True, guessed_weights=weights)\n\n max_weight = instance.max_weight_bounds[1]\n feasible_weights = list(filter(lambda w: w <= max_weight,\n instance.weights))\n\n if not silent:\n print(instance.weights, feasible_weights)\n\n # figure out whether we get the first or last positions for free\n largest_free = False\n smallest_free = False\n # check largest weight first\n if instance.max_weight_bounds[0] == instance.max_weight_bounds[1]:\n largest_free = True\n largest = instance.max_weight_bounds[0]\n if min(instance.weights) == 1:\n smallest_free = True\n smallest = 1\n\n positions = list(range(int(smallest_free), k-int(largest_free)))\n\n # iterate over the number of unguessed weights\n for diff in range(k+1):\n if not silent:\n print(\"Diff =\", diff)\n # iterate over positions of guessed weights. We want them to be\n # ordered, but choose the smallest first to be removed\n for rev_indices in itertools.combinations(reversed(positions), k-diff):\n indices = list(reversed(rev_indices))\n p = len(indices)\n # when k-1 values are determined, it also determines the kth value\n if p == k-1:\n continue\n # iterate over choices for those guessed weights\n for chosen_weights in itertools.combinations(feasible_weights, p):\n weights = [None] * k\n\n # assign the chosen weights to the guessed positions\n for p, w in zip(indices, chosen_weights):\n weights[p] = w\n\n # add in free values\n if smallest_free:\n weights[0] = smallest\n if largest_free:\n weights[k-1] = largest\n\n # quit if this didn't work\n if not is_feasible(weights, flow, max_weight):\n continue\n\n if not silent:\n print(\"Trying weights\", weights)\n sol = solve_dp(instance, silent=True, guessed_weights=weights)\n if len(sol) > 0:\n if not silent:\n try:\n for s in sol:\n print(s, sum(s.path_weights), flow)\n except AttributeError:\n print(\"Unterdetermined solution\")\n return sol", "def miniMax(self,state,depth=0):\n \n #print(\"NextState (depth \"+str(depth)+\"):\")\n #print(\"Action: \"+state.get_action())\n \n if state in self.__explored:\n return self.__explored[state.get_hashable_state()]\n \n if state.is_end_state() or depth >= (self.__max_depth - 1):\n self.__explored[state.get_hashable_state()] = state.get_utility_value()\n return state.get_utility_value() #Return terminal state's utility value\n \n is_max_turn = state.get_max_turn()\n childList = state.get_successors()\n \n if is_max_turn:\n utility = float(\"-inf\")\n for c in childList:\n utility = max(utility,self.miniMax(c, depth+1))\n self.__explored[state.get_hashable_state()] = utility\n return utility\n else:\n utility = float(\"inf\")\n for c in childList:\n utility = min(utility,self.miniMax(c, depth+1))\n self.__explored[state.get_hashable_state()] = utility\n return utility", "def minimax_decision(gameState):\n value = -sys.maxsize\n best_value = -sys.maxsize\n best_move = None\n legal_moves = gameState.get_legal_moves()\n for move in legal_moves:\n game = gameState.forecast_move(move)\n value = max(value, min_value(game))\n if value > best_value:\n best_value = value\n best_move = move\n return best_move", "def action(self, direction: str) -> bool:\n direction = direction[0].upper()\n assert (\n direction in constants.BABY_MOVEMENTS\n ), f\"Movement must be one of {constants.BABY_MOVEMENTS}\"\n if direction == \"R\":\n legal_moves = []\n if self.position[0] != 0:\n legal_moves.append(\"N\")\n if self.position[0] != self.board_dimensions[0] - 1:\n legal_moves.append(\"S\")\n if self.position[1] != 0:\n legal_moves.append(\"W\")\n if self.position[1] != self.board_dimensions[1] - 1:\n legal_moves.append(\"E\")\n direction = np.random.choice(legal_moves)\n if direction == \"N\":\n if self.position[0] != 0:\n self.position[0] -= 1\n return True\n else:\n return False\n elif direction == \"E\":\n if self.position[1] != self.board_dimensions[1] - 1:\n self.position[1] += 1\n return True\n else:\n return False\n elif direction == \"S\":\n if self.position[0] != self.board_dimensions[0] - 1:\n self.position[0] += 1\n return True\n else:\n return False\n elif direction == \"W\":\n if self.position[1] != 0:\n self.position[1] -= 1\n return True\n else:\n return False\n return False", "def make_random_move(self):\n choice = None\n options = []\n #generate full moves list\n for i in range(self.width):\n for j in range(self.height):\n #make sure move has not been made\n if (i,j) not in self.moves_made:\n #make sure move is not a mine\n if (i,j) not in self.mines:\n options.append((i,j))\n #if there are no options, return None\n if len(options) == 0:\n return None\n\n #pick a random option from generated list\n choice = random.choice(options)\n return choice\n\n \"\"\"\n For kicks and giggles I wrote this extra bit to determine a\n rough intuitive probability for each option based on the knowledge\n base, so rather than picking a choice randomly the AI can choose\n the option that is, at least intuitively, least likely to blow up.\n Better to take the 1/8 chance than the 1/3 chance, right?\n \"\"\"\n best_chance = 1\n #iterate through generated options\n for option in options:\n #Could set chance to 1/8, but the AI wouldn't actually know that. I\n #only know it because I can read the code...But for the purposes of this\n #drill we'll say the AI doesn't know how many bombs are placed.\n #Better then to pick a square we know nothing about than one that\n #has a 1/8 chance of exploding. Gather more information that way.\n chance = 0\n for sentence in self.knowledge:\n #look to see if current option is in sentences\n if option in sentence.cells:\n #use sentence count and length of cell set to calculate probability\n prob = sentence.count / len(sentence.cells)\n if prob > chance:\n #Looking for the highest explosive probability for this square\n chance = prob\n if chance < best_chance:\n #If this option has lower odds of exploding than current best, it becomes\n #the optimal\n best_chance = chance\n choice = option\n\n #return choice", "def swap(self, direction):\n directions = {'up': (-1, 0), 'down': (1, 0), 'left': (0, -1), 'right': (0, 1),}\n new_row = self.__blank_box[0] + directions[direction][0]\n new_col = self.__blank_box[1] + directions[direction][1]\n new_position = self.__get_box((new_row*self.__length)+new_col)\n self.__board[self.__blank_box[0]][self.__blank_box[1]] \\\n = new_position\n self.__board[new_row][new_col] = None\n self.__blank_box = (new_row, new_col)\n self.__set_possibilities()\n self.__previous_move = direction", "def solveOneStep(self):\n ### Student code goes here\n # Mark this move as explored\n self.visited[self.currentState] = True\n\n # Get move to make\n movables = self.gm.getMovables()\n # print(\"EXPLORING GAME STATE \" + str(self.gm.getGameState()) + \"---------------------------------------------------------\")\n to_move = self.currentState.nextChildToVisit # movables index\n # print(\"depth \", self.currentState.depth)\n\n # Return if done\n if self.currentState.state == self.victoryCondition:\n # print(\"DONE\")\n return True\n\n while to_move < len(movables):\n # Make the move\n movable_statement = movables[to_move]\n # print(\"implementing move \", movable_statement)\n self.gm.makeMove(movable_statement)\n\n # Create a new state with this move made\n new_state = self.gm.getGameState()\n\n # Find out if this state has already been explored\n visited = False\n for visited_state in self.visited.keys():\n if visited_state.state == new_state:\n visited = True\n\n # If the new state hasn't been visited then add it as a child then move down to this child\n if not visited:\n new_gs = GameState(new_state, self.currentState.depth + 1, movable_statement)\n new_gs.parent = self.currentState\n self.currentState.children.append(new_gs)\n self.currentState.nextChildToVisit = to_move + 1\n self.currentState = new_gs\n break\n\n # Else skip this state and try going to the next movable statement\n else:\n # print(\"SKIP THIS STATE\")\n self.gm.reverseMove(movable_statement)\n to_move += 1\n\n # Went all the way down to a leaf, backtrack\n if (to_move >= len(movables)):\n self.gm.reverseMove(self.currentState.requiredMovable)\n self.currentState = self.currentState.parent\n\n return False", "def backup_negamax(node, reward):\n temp_node = node\n temp_node.N += 1\n temp_node.Q += reward\n for parent, _ in temp_node.parents:\n backup_negamax(parent, -reward)", "def target_mode(self):\r\n if not self.in_target_mode:\r\n # If not in target mode, choose a target\r\n while not inside_polygon(self.target.x,self.target.y, poi):\r\n self.target=PVector(random(-1,1),random(-1,1),0)\r\n self.target.setMag(random_g(20,3))\r\n self.target.add(self.pos)\r\n self.target = random_p([(self.target, 3),(self.coh, 2)])\r\n if self.target.x > field.x:\r\n self.target.x = field.x\r\n elif self.target.x < 0:\r\n self.target.x =0\r\n if self.target.y > field.y:\r\n self.target.y = field.y\r\n elif self.target.y < 0:\r\n self.target.y =0\r\n # print(\"new target\", self.target.x, self.target.y)\r\n self.in_target_mode = True\r\n \r\n if PVector.dist(self.pos, self.target) < 3:\r\n self.in_target_mode = False\r\n if self.exiting:\r\n self.status = 3\r\n else:\r\n self.status = random_p([(0,3),(1,6),(2,5)])\r\n else:\r\n self.targeting()", "def _minimize(self, board, possible_actions, depth_limit, alpha, beta):\r\n pass", "def move(self, state):\n result = None\n self.currentDepthLimit = 0\t\n\tself.transposition = {}\n\tself.counter = 0\n\n\twhile True:\n u = float(\"inf\")\n\t v = float(\"-inf\")\n\t self.counter = 0\n\t result = None\n\t self.transposition = {}\n\t for a in state.actions():\n new = self.min_value(state.result(a), float(\"-inf\"), float(\"inf\"),self.currentDepthLimit)\n\t if new > v:\n\t v = new\n\t result = a\n\n\t elif new == v:\n\t if a.index < result.index:\n\t result = a\n\t if self.is_time_up():\n\t return result\n\t \n\t self.currentDepthLimit += 1\n\t \"\"\"If we never use evaluate function, it means all state are terminated, so return whatever the result is\"\"\"\n\t if self.counter == 0:\n\t break\n\t if self.is_time_up():\n \t return result\n\treturn result", "def solveOneStep(self):\n ### Student code goes here\n if self.first_step == False:\n self.first_step = True\n if self.solveOneStep():\n return True\n if self.queue:\n self.gm_init()\n ele = self.queue.get()\n #print (len(ele))\n state = ele[0]\n premoves = ele[1]\n\n for m in premoves:\n self.gm.makeMove(m)\n if state.state == self.victoryCondition:\n return True\n self.visited[state] = True\n print(\"CURRENTSTATE:\")\n print(self.gm.getGameState())\n print(\"*******\")\n moves = self.gm.getMovables()\n for m in moves:\n self.gm.makeMove(m)\n if (((state.parent is not None) and (self.gm.getGameState() == state.parent.state))) or GameState(self.gm.getGameState(), 0, None) in self.visited:\n self.gm.reverseMove(m)\n continue\n self.visited[GameState(self.gm.getGameState(), 0, None)] = True\n new_pmv = [i for i in premoves]\n new_pmv.append(m)\n next_state = GameState(self.gm.getGameState(), state.depth+1, m)\n next_state.parent = state\n state.children.append(next_state)\n self.queue.put([next_state, new_pmv])\n self.gm.reverseMove(m)\n self.currentState = state\n\n #for i in range(len(premoves)-1, -1, -1):\n # mv = premoves[i]\n # self.gm.reverseMove(mv)\n return False", "def move(self, direction):\n\n if direction == \"north\":\n self.go_and_update(-1, 0)\n\n elif direction == \"south\":\n self.go_and_update(1, 0)\n\n elif direction == \"east\":\n self.go_and_update(0, 1)\n\n elif direction == \"west\":\n self.go_and_update(0, -1)", "def expectimax_move(game, method='score'):\n\n if method == 'score':\n def val(g):\n return g[1]\n elif method == 'empty':\n val = empty_squares\n elif method == 'gradient':\n val = gradient_value\n else:\n print('Invalid method given to expectimax function')\n exit(1)\n\n _, move = expectimax(game, 2, val)\n return move", "def solve(self):\n # Use a trivial tour (1-2-3-...-N-1) to set the global upper bound.\n tour = list(range(self._N))\n upper_bound = sum([self._G[i][(i + 1) % self._N] for i in range(self._N)])\n trace = []\n\n # Start from a configuration with a single vertex.\n frontier = [BranchAndBoundConfiguration(self._G, self._N, [0], LOWER_BOUND_METHOD)]\n\n # Set the start time.\n start_time = time.time()\n\n # Branch and bound until the frontier set is empty or the time has expired.\n while frontier and (time.time() - start_time) < self._cutoff_time:\n # Fetch the most promising configuration.\n config = heappop(frontier)\n\n # Expand configuration by appending a vertex to the path.\n for v in range(self._N):\n try:\n expanded_config = config.expand(v)\n except ValueError:\n # Expanded configuration is not valid.\n continue\n if expanded_config.is_solution():\n # Update the global upper bound, if needed.\n this_solution = expanded_config.get_cycle_cost()\n if this_solution < upper_bound:\n # Log it.\n trace.append((time.time() - start_time, this_solution))\n # Update the best solution.\n upper_bound = this_solution\n tour = list(expanded_config.get_path())\n elif expanded_config.get_lower_bound() < upper_bound:\n # Add to the frontier set.\n heappush(frontier, expanded_config)\n return (upper_bound, [self._index_to_id[v] for v in tour], trace)", "def solve_maze(self):\n initial_maze_loc = self.maze.location\n curr_coord = initial_maze_loc\n solution_path_directions = []\n #print(\"in solve_maze:\")\n\n # The agent always chooses the next location with the highest Q value.\n # With this strategy, the agent aims to reach the goal using the\n # most optimal path possible.\n while (self.grid[curr_coord[0]][curr_coord[1]] != 'G' and\n self.grid[curr_coord[0]][curr_coord[1]] != 'E'):\n possible_moves = self.maze.moves()\n\n # Find the next best move.\n best_next_move = (0,0)\n best_next_move_q = float('-inf')\n for move in possible_moves:\n if self.qtable[curr_coord[0]+move[0]][curr_coord[1]+move[1]] >= best_next_move_q:\n best_next_move = move\n best_next_move_q = self.qtable[curr_coord[0]+move[0]][curr_coord[1]+move[1]]\n\n direction = self.maze.moves_to_dirs[best_next_move]\n solution_path_directions.append(direction)\n curr_coord = (curr_coord[0]+best_next_move[0], curr_coord[1]+best_next_move[1])\n self.maze.location = curr_coord\n self.maze.location = initial_maze_loc # reset maze location to initial coord.\n\n return solution_path_directions", "def MinimizeMode(self, mode):\r\n \r\n self.minimize_mode = mode\r\n return self", "def second_heuristic(self):\r\n directions = [[-1, -1], [-1, 1], [1, 1], [1, -1]]\r\n # aceasta matrice indica valoarea pe care o are mutarea unei piese pe o celula aleasa\r\n # se va aduna la media ponderilor adunate in lista weights\r\n\r\n # mijlocul tablei este punctul cel mai vulnerabil\r\n # in timp ce lateralele sunt sigure,iar linia bazei transforma piesa in rege\r\n\r\n points = [[0, 4, 0, 4, 0, 4, 0, 4],\r\n [4, 0, 3, 0, 3, 0, 3, 0],\r\n [0, 3, 0, 2, 0, 2, 0, 4],\r\n [4, 0, 2, 0, 1, 0, 3, 0],\r\n [0, 3, 0, 1, 0, 2, 0, 4],\r\n [4, 0, 2, 0, 1, 0, 3, 0],\r\n [0, 3, 0, 2, 0, 2, 0, 4],\r\n [4, 0, 4, 0, 4, 0, 4, 0]]\r\n\r\n weights = [0 for i in range(4)]\r\n whites, blacks = 0, 0\r\n for i in range(8):\r\n for j in range(8):\r\n\r\n # numaram discurile de fiecare culoarea\r\n blacks += 1 if self.matrix[i][j] in ['N', 'n'] else 0\r\n whites += 1 if self.matrix[i][j] in ['A', 'a'] else 0\r\n\r\n if self.matrix[i][j] in [self.current_player, self.current_player.upper()]:\r\n\r\n # daca e piesa normala\r\n if self.matrix[i][j] == self.current_player:\r\n weights[0] += 4\r\n\r\n # cat de aproape este piesa de a deveni rege ( nr de linii din tabla - cate mai are pana ajunge pe ultima linie)\r\n\r\n # cu cat se apropie piesa mai multe de a deveni rege, scorul creste( negru - rege pentru i=0, alb -rege pentru i =7)\r\n if self.matrix[i][j] == 'n':\r\n weights[1] += (7 - i)\r\n elif self.matrix[i][j] == 'a':\r\n weights[1] += i\r\n else:\r\n # daca e piesa rege\r\n weights[0] += 8\r\n\r\n # cat de aproape este piesa rege de celelalte piese\r\n for d in directions:\r\n if self.matrix[i][j] == self.current_player.upper():\r\n # gaseste pe diagonala in directia d, o piesa adversara,daca exista\r\n x, y = self.find_piesa(i, j, d)\r\n if x and y:\r\n weights[2] += (x - i) * (x - i) + (y - j) * (y - j)\r\n vx = d[0] + i\r\n vy = d[1] + j\r\n back_x = i - d[0]\r\n back_y = j - d[1]\r\n next_x, next_y = vx + d[0], vy + d[1]\r\n # piesele pe care le poate captura jucatorul, daca e piesa rege are un scor mai mare\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(next_x, next_y) and self.matrix[next_x][next_y] == '.':\r\n if self.matrix[next_x][next_y] == self.opponent().upper():\r\n weights[3] += 7\r\n else:\r\n weights[3] += 4\r\n # piese care pot fi capturate; la fel daca este piesa rege atunci se scade mai mult scorul\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(back_x, back_y) and self.matrix[back_x][back_y] == '.':\r\n if self.matrix[vx][vy] == self.opponent().upper():\r\n weights[3] -= 6\r\n else:\r\n weights[3] -= 3\r\n # adunam piesa la media sumei date pentru a face AI-ul in caz de egalitate a scorului\r\n # sa imi aleaga piesa care ma pozitioneaza mai bine\r\n if self.move:\r\n return sum(weights) / 4 + points[self.move[0]][self.move[1]]\r\n return sum(weights) / 4\r\n\r\n def __str__(self):\r\n s = ' '\r\n for i in range(8):\r\n s += str(i) + ' '\r\n s += '\\n'\r\n for index, line in enumerate(self.matrix):\r\n s += str(chr(index + ord('a'))) + ' '\r\n for el in line:\r\n s += str(el) + ' '\r\n s += '\\n'\r\n\r\n return s", "def constrain(self, minVector=None, maxVector=None, sequential=False, bind=True):\n v = self.vector\n if bind:\n if minVector is not None:\n v = numpy.maximum(minVector, v)\n if maxVector is not None:\n v = numpy.minimum(maxVector, v)\n if sequential:\n v = numpy.sort(v)\n self.vector = v", "def milp(mdp, maxV, zeroConstraints=()):\n m = Model()\n m.setParam('OutputFlag', False)\n\n # convert notation to previous implementation\n S = mdp.S\n A = mdp.A\n R = mdp.rFuncs\n psi = mdp.psi\n T = mdp.T\n alpha = mdp.alpha\n gamma = mdp.gamma\n\n # useful constants\n rLen = len(R)\n M = 10000 # a large number\n Sr = range(len(S))\n Ar = range(len(A))\n\n # decision variables\n x = m.addVars(len(S), len(A), lb=0, name='x')\n z = m.addVars(rLen, vtype=GRB.BINARY, name='z')\n y = m.addVars(rLen, name='y')\n\n # constraints on y\n for i in range(rLen):\n m.addConstr(y[i] <= sum([x[s, a] * R[i](S[s], A[a]) for s in Sr for a in Ar]) - maxV[i] + (1 - z[i]) * M)\n m.addConstr(y[i] <= z[i] * M)\n\n # constraints on x (valid occupancy)\n for sp in Sr:\n m.addConstr(sum(x[s, a] * ((s == sp) - gamma * T(S[s], A[a], S[sp])) for s in Sr for a in Ar) == alpha(S[sp]))\n\n # == constraints\n for consIdx in range(len(zeroConstraints)):\n m.addConstr(sum(x[S.index(s), A.index(a)] for s, a in zeroConstraints[consIdx]) == 0)\n # obj\n m.setObjective(sum([psi[i] * y[i] for i in xrange(rLen)]), GRB.MAXIMIZE)\n\n m.optimize()\n\n pi = {(S[s], A[a]): x[s, a].X for s in Sr for a in Ar}\n\n if m.status == GRB.Status.OPTIMAL:\n # return feasible being true and the obj value, opt pi\n # .X attribute is to retrieve the value of the variable\n return pi\n else:\n # simply return infeasible\n raise Exception('milp problem optimal solution not found' + m.status)", "def minMoves(maze, x, y):\n\n def maze_guard():\n \"\"\"Guard function to block oversized dimensions\"\"\"\n cell_guard = all([1 <= len(row) <= 100 for row in maze])\n row_guard = 1 <= len(maze) <= 100\n return cell_guard and row_guard\n\n def walk_maze(finish):\n \"\"\"Walks the maze, finding the shortest path including all coins.\n Finishes when reach the coordenate finish, a tuple with row and\n column numbers\n \"\"\"\n i, j = (0, 0)\n result = -1\n weight = -1\n while nodes:\n i, j, path, coins = nodes.popleft()\n cell = maze[i][j]\n if (i, j) == finish:\n weight, result = check_result(coins, path, weight, result)\n elif cell != 1:\n adjacent_nodes(i, j, path, coins)\n\n return result\n\n def adjacent_nodes(i, j, path, coins):\n \"\"\"Adds the node in positions i, j, with its path added to\n accumulated path. The path is transformed into a binary\n number, i.e, 2 ** (i * n + j), being n the number of rows\n in the maze matrix.\n \"\"\"\n def neighbour(x, y):\n this_path = 2 ** (i * n + j)\n if not this_path & path:\n coin = coins + 1 if maze[i][j] == 2 else coins\n nodes.append((x, y, path + this_path, coin))\n\n coord = [(i + 1, j, i + 1 < n), (i - 1, j, i - 1 >= 0),\n (i, j + 1, j + 1 < m), (i, j - 1, j - 1 >= 0)]\n _ = [neighbour(x, y) for x, y, test in coord if test]\n\n if not maze_guard():\n return -1\n\n n = len(maze)\n m = len(maze[0])\n nodes = deque([(0, 0, 0, 0)])\n return walk_maze((x, y))", "def reward_shaping(self, state_desc):\r\n # Reward for not falling down\r\n reward = 10.0\r\n\r\n yaw = state_desc['joint_pos']['ground_pelvis'][2]\r\n current_v_x, current_v_z = rotate_frame(\r\n state_desc['body_vel']['pelvis'][0],\r\n state_desc['body_vel']['pelvis'][2], yaw)\r\n # leftward\r\n current_v_z = -current_v_z\r\n\r\n # current relative target theta\r\n target_v_x, target_v_z = state_desc['v_tgt_field'][0][5][5], state_desc['v_tgt_field'][1][5][5]\r\n\r\n vel_penalty = np.linalg.norm([target_v_x - current_v_x, target_v_z - current_v_z])\r\n\r\n muscle_penalty = 0\r\n for muscle in sorted(state_desc['muscles'].keys()):\r\n muscle_penalty += np.square(state_desc['muscles'][muscle]['activation'])\r\n\r\n ret_r = reward - (vel_penalty * self.vel_penalty_coeff + muscle_penalty * self.muscle_penalty_coeff) * self.penalty_coeff\r\n return ret_r", "def possible_to_move(self, down=False, left=False, right=False):\n old_coordinate = self.coordinate[:]\n possible = False\n self.move(down, left, right)\n if self.board.is_valid_tetromino(self):\n possible = True\n\n self.coordinate = old_coordinate\n return possible" ]
[ "0.5255628", "0.51518804", "0.50815153", "0.5040505", "0.50042754", "0.49726215", "0.48932496", "0.4887833", "0.486175", "0.48460725", "0.4834822", "0.4807731", "0.48042133", "0.47982746", "0.47937402", "0.4761865", "0.4752347", "0.4741787", "0.47137696", "0.46898678", "0.46607664", "0.46577516", "0.4634375", "0.46226537", "0.46104294", "0.46091238", "0.46086308", "0.4607482", "0.45969924", "0.45945126", "0.45797002", "0.45672306", "0.45608345", "0.45515925", "0.45402956", "0.45341069", "0.45320237", "0.4531954", "0.45273888", "0.45267397", "0.45264912", "0.452469", "0.45205808", "0.45124567", "0.45117545", "0.45107207", "0.45093408", "0.4504862", "0.45044455", "0.4499024", "0.4497421", "0.4492508", "0.44881204", "0.44794232", "0.4477267", "0.44669974", "0.44620112", "0.44602865", "0.44567344", "0.44506115", "0.4447649", "0.44378278", "0.44354907", "0.44336742", "0.44316345", "0.44298506", "0.44218242", "0.44203752", "0.4414028", "0.4410713", "0.44060326", "0.44055596", "0.4397019", "0.43968418", "0.43964747", "0.4394721", "0.43895483", "0.43875566", "0.43858013", "0.43852097", "0.43794757", "0.43775126", "0.43765792", "0.43761194", "0.43757755", "0.43748096", "0.43729916", "0.4372775", "0.43705648", "0.43695605", "0.43652576", "0.43612227", "0.4361074", "0.4356017", "0.43539575", "0.4353087", "0.43529728", "0.43509448", "0.43499854", "0.43466794" ]
0.7806484
0
Return the complement of a given set w.r.t. the set of mapped constraints.
def complement(self, aset): return self.all_n.difference(aset)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def complement(self) -> 'RangeSet':\n return RangeSet(Range()) - self", "def get_complement(seta):\n\n complement_set = set()\n\n for elem in seta:\n new_elem_tuple = (elem[0], float(D('1.0') - D(str(elem[1]))))\n complement_set.add(new_elem_tuple)\n\n return complement_set", "def complement(self):\n for cell in self.compact:\n cell.set(not cell.peg)", "def complement(self):\n N = self._size + 1\n new_covers = [[N - i[0], N - i[1]] for i in self._poset.cover_relations_iterator()]\n return TamariIntervalPoset(N - 1, new_covers)", "def only_diff_elements(set_1, set_2):\n return (set_1 ^ set_2)", "def _complement(self):\n assert self._.d == 2, \"the complement is only defined for two classes\"\n kargs = {\"complement\": self}\n if self._has(\"p\"):\n kargs[\"p\"] = self._.p.reorder([0, 2, 1], inplace=False)\n elif self._has(\"q\"):\n kargs[\"q\"] = self._.q.reorder([0, 2, 1], inplace=False)\n elif self._has(\"P\"):\n kargs[\"P\"] = self._.P[[0, 2, 1], [0, 2, 1]]\n elif self._has(\"Q\"):\n kargs[\"Q\"] = self._.Q[[0, 2, 1], [0, 2, 1]]\n return ASParameters(**kargs)", "def symmetric_difference(self, rng_set: Union[Rangelike, Iterable[Rangelike]]) -> 'RangeSet':\n # convert to a RangeSet\n rng_set = RangeSet._to_rangeset(rng_set)\n # get union and then remove intersections\n union = self.union(rng_set)\n intersection = self.intersection(rng_set)\n union.difference_update(intersection)\n return union", "def complement(self):\n assert self._.d == 2, \"the complement is only defined for two classes\"\n return self._.complement", "def vcf_complement_set_snvs(vcf_input: str, set_negative:set, vcf_output:str):\n with pysam.VariantFile(vcf_input) as input_vcf_handle:\n with pysam.VariantFile(vcf_output,header=input_vcf_handle.header,mode='w') as out:\n for record in input_vcf_handle:\n\n if len(record.ref)!=1:\n continue\n\n if len(record.alts[0])!=1:\n continue\n\n\n if not (record.chrom,record.pos) in set_negative:\n out.write(record)", "def subtract(wb, wl):\n return set(wb) - set(wl)", "def union_of_non_none_sets(sets):\r\n return functools.reduce(lambda x, y: x.union(y), filter(lambda z: z is not\\\r\n None, sets), set())", "def prune_sequence(sequence_set, extended_set):\n tmp_set = set()\n for seq in sequence_set:\n # se una sotto-sequenza e' trovata viene ignorata, altrimenti e' aggiunta al set temporaneo\n found = False\n for ext in extended_set:\n if seq1_in_seq2(seq, ext, 0): # eps e' 0 perche' le sequenze sono identiche\n found = True\n break\n if not found:\n tmp_set.add(seq)\n # alla fine aggiungi tutto il set esteso, si puo' includere nel ciclo precedente\n for ext in extended_set:\n tmp_set.add(ext)\n return tmp_set", "def complement(G):\n\n nset = set(G.nodes())\n n_nodes = G.order()\n n_edges = n_nodes * (n_nodes - 1) - G.size() + 1\n \n cmp_edges = ((u, v) for u in G.nodes()\n\t\t for v in nset - set(G.successors(u)))\n deg = make_deg(n_nodes, cmp_edges)\n cmp_edges = ((u, v) for u in G.nodes()\n\t\t for v in nset - set(G.successors(u)))\n H = make(n_nodes, n_edges, cmp_edges, deg)\n return H", "def __sub__(self, vs):\n return [v for v in self.__elements if tuple(v) not in map(tuple, vs)]", "def complement(self):\n comp = self.__class__(self.name, complement(self.seq),\n start=self.start, end=self.end)\n comp.comp = False if self.comp else True\n return comp", "def unlabeled_set(self):\n # unlabeled set is the query set minus the preselected set\n unlabeled_tag_bitmask = self._query_tag_bitmask - self._preselected_tag_bitmask\n return unlabeled_tag_bitmask.masked_select_from_list(\n self.api_workflow_client.filenames_on_server\n )", "def remove_super_sets(sub_set, set_of_sets):\n return [x for x in set_of_sets if not set(x).issuperset(set(sub_set))]", "def apply_to_sets(cls, sets):\n for sq_set in sets:\n sqs_with_val = {}\n sqs_by_bitmask = {}\n for sq in iter(sq_set):\n for sq2 in iter(sq_set):\n if sq2.known_value:\n sq.eliminate(sq2)\n\n pvals = sq.possible_values()\n\n if sq.bitmask not in sqs_by_bitmask:\n sqs_by_bitmask[sq.bitmask] = []\n sqs_by_bitmask[sq.bitmask].append(sq)\n\n for val in pvals:\n if val not in sqs_with_val:\n sqs_with_val[val] = []\n sqs_with_val[val].append(sq)\n\n for val, sqs in sqs_with_val.iteritems():\n if len(sqs) == 1:\n sqs[0].set_value(val)\n\n for bm, sqs in sqs_by_bitmask.iteritems():\n if len(sqs) > 1:\n pvals = list(SudokuSquare.bitmask_to_possible_values(bm))\n if len(sqs) == len(pvals):\n for sq in iter(sq_set):\n if sq not in sqs:\n sq.eliminate(sqs[0])", "def constraint_not_adjacent(m, n) :\n return not constraint_adjacent(m,n)", "def Res(K: Set[CNFClause]) -> Set[CNFClause]:\n K_list = list(K)\n res = set()\n for i in range(len(K_list) - 1):\n for j in range(i + 1, len(K_list)):\n for literal in list(K_list[i].literals):\n if - literal in K_list[j].literals:\n resolute = (K_list[i] | K_list[j]) - CNFClause({literal, -literal})\n if not resolute.is_tautology():\n res.add(resolute)\n break\n return K | res", "def _complement(self, k, p):\n assert self._.d == 2, \"the complement is only defined for two classes\"\n if checkPos(self._.b[0] - self._.c[2]):\n return self._get_class()((k[2], p[2, 2, 1]),\n (Integer(1), p[1, 2, 2]),\n complement=self)\n else:\n return ASParameters._complement(self)", "def notInSet(stack):\n assertArity(stack, 2)\n rhs, lhs = stack.pop(), stack.pop()\n assertType(rhs, Set)\n return lhs not in rhs", "def _filter_dict(src_dict, key_set):\n for k in set(src_dict.keys()) - key_set:\n src_dict.pop(k)", "def complement(self, **kwargs):\n self._data.switch_complement(whether=True, **kwargs)\n return self", "def filter(self, new_set):\n for old_set in self.itervalues():\n for feat in old_set.iterkeys():\n if feat not in new_set:\n del old_set[feat]\n return self", "def unsolved_cells(self) -> Set[Cell]:\n\t\treturn set(self.iter_unsolved_cells())", "def unsolved_cells(self) -> Set[Cell]:\n\t\treturn set(self.iter_unsolved_cells())", "def complement(self, universe):\n return Complement(universe, self)", "def __xor__(self, other: t.Any) -> InspectableSet[_C]:\n return self._op_copy('__xor__', other)", "def remove_elements_from_set(s: set, *args) -> set:\n for _ in args:\n s.remove(_)\n return s", "def __eliminate_unused_constraits (self, objects):\n result = []\n for c in self.constraints_:\n if c [0] in objects and c [1] in objects:\n result.append (c)\n\n return result", "def union(set1, set2):", "def complement_ixes(ixes, y):\n try:\n y = len(y)\n except Exception:\n pass\n\n all_ixes = np.ones(y, dtype=np.uint8)\n all_ixes[ixes] = 0\n\n return np.where(all_ixes == 1)[0]", "def difference_update(self, rng_set: Union[Rangelike, Iterable[Rangelike]]) -> None:\n self.discard(RangeSet._to_rangeset(rng_set))", "def reverseComplementAll(self):\n\t\tseqDict = self.sequenceDict()\n\t\tfor i in range(len(seqDict.keys())):\n\t\t\tx = seqDict.keys()[i]\n\t\t\tprint self.reverseComplement(x)\n\t\treturn ''", "def difference(A, B, *C):\n return setutils(\"difference\", A, B, *C)", "def intersect_sets(S):\n res = S[0]\n for s in S:\n res &= s\n return res", "def complement(self) -> JustLatticeChord:\n nodes = []\n for node in self._nodes:\n nodes.append(list(map(lambda x, y: y - x, node, self._root)))\n return JustLatticeChord(self._fundamental, self._root, nodes)", "def complement(G):\n R = G.__class__()\n R.add_nodes_from(G)\n R.add_edges_from(((n, n2)\n for n, nbrs in G.adjacency()\n for n2 in G if n2 not in nbrs\n if n != n2))\n return R", "def common_cities_excluding(members, member_to_exclude, city_sets):\n\n cities = common_cities(members, city_sets)\n cities = [x for x in cities\n if x not in city_sets[member_to_exclude]]\n\n return cities", "def without_keys(keys):\n keys = frozenset(keys) # frozenset has efficient membership lookup\n return filter_keys_c(fnot(partial(operator.contains, keys)))", "def complement_base(base):\n return complements[base]", "def symmetricDifference(stack):\n assertArity(stack, 2)\n rhs, lhs = stack.pop(), stack.pop()\n assertType(lhs, Set)\n assertType(rhs, Set)\n return Set(lhs ^ rhs)", "def unused(permutation, nb_elements):\n return tuple(set(range(nb_elements)) - set(permutation))", "def removeFromSet(_session, _el, _set):\n it = _session.create_iterator(_session.sc_constraint_new(sc_constants.CONSTR_3_f_a_f,\n _set,\n sc.SC_ARC,\n _el), True)\n while not it.is_over():\n _session.erase_el(it.value(1))\n it.next()", "def set_difference(set_a, set_b):\n \n diff = set_b - set_a\n \n return diff", "def __invert__(self):\n return NotAny(self)", "def complement(seq):\n complement_dict = {'A': 'T', 'C': 'G', 'T': 'A', 'G': 'C'}\n seq_list = list(seq)\n seq_list = [complement_dict[base] for base in seq_list]\n return ''.join(seq_list)", "def get_complement(s):\n dna_complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}\n return ''.join(filter(None, [ dna_complement[c.upper()] if c.upper() in dna_complement else '' for c in s ] ))", "def difference(self, rng_set: Union[Rangelike, Iterable[Rangelike]]) -> 'RangeSet':\n new_rng_set = self.copy()\n new_rng_set.difference_update(RangeSet(rng_set))\n return new_rng_set", "def excluded_combos(list_1: list[str], list_2: list[str]) -> list[tuple[str, str]]:\n all_combos: list[tuple[str, str]] = []\n for item_1 in list_1:\n for item_2 in list_2:\n all_combos.append((item_1, item_2))\n return [item for item in all_combos if item not in set(representative_combos(list_1, list_2))]", "def find_not_in_icd_set(lenoforginalset, numbers_set):\r\n originalset = [str(i) for i in range(1, lenoforginalset+1, 1)]\r\n\r\n numbers_not_in_set = list(set(originalset)-set(numbers_set))\r\n numbers_not_in_set = sorted([int(num) for num in numbers_not_in_set])\r\n numbers_not_in_set = [str(num) for num in numbers_not_in_set]\r\n\r\n return numbers_not_in_set", "def __invert__(self) -> Seq:\n return self.reverse_complement()", "def only_diff_elements(s1, s2):\n\n return set(s1 ^ s2)", "def complement_this(seq):\n compliment_dict = {'A':'T', 'T':'A', 'C':'G', 'G':'C'}\n rev_seq = ''\n for nuc in seq:\n if nuc in ['A', 'T', 'G', 'C']:\n rev_seq += compliment_dict[nuc]\n return rev_seq", "def get_list_difference(self, set_one, set_two):\n s1 = set(set_one)\n s2 = set(set_two)\n return list(s1.difference(s2))", "def difference(self, *lists):\n if self.is_a(set):\n return _(self._.difference(*lists))\n return _(_difference(self._, *lists))", "def set_diff(seq0, seq1):\n return list(set(seq0) - set(seq1))", "def negate_all(f):\n return lambda *args, **kwargs: [-y for y in f(*args, **kwargs)]", "def negate_all(f):\n return lambda *args, **kwargs: [-y for y in f(*args, **kwargs)]", "def sign_possibilities(dim):\n vecs = []\n for i in range(dim+1):\n vec = np.ones(dim)\n vec[:i] *= -1\n for svec in permutations(vec):\n vecs += [svec]\n return list(set(vecs))", "def union_sets(S):\n res = set()\n for s in S:\n res |= s\n return res", "def get_missing(dicts):\n for d in dicts:\n for k, v in d.items():\n d[k] = set([1, 2, 3, 4, 5, 6, 7, 8, 9]) - set(v)\n return dicts", "def remove_naked_sets_from_candidates(c, *args, naked_sets=defaultdict(list)):\n for d in args:\n for k, v in d.items():\n for coord in v:\n c[coord] = [n for n in c[coord] if n not in k]\n naked_sets[coord].extend(list(k))\n return c, dict(naked_sets)", "def diff_(S, p):\n return S.difference(set(p))", "def exclude_foreign_resources(returned_resource_set, expected_resource_set):\n expected_owners = {res.owner for res in expected_resource_set}\n return [\n res for res in returned_resource_set\n if res.owner in expected_owners\n ]", "def __xor__(self, other):\n if not isinstance(other, UniSet):\n other = self.fam.c_uniset(other)\n return self.fam.c_xor(self, other)", "def negate_all(f):\r\n return lambda *args, **kwargs: [-y for y in f(*args,**kwargs)]", "def complement(x):\n out = 1 - x\n return out", "def remove_pruned_subsets(subsets, min_deps):\n for n in subsets[:]:\n if min_deps.contains_superset(n.attrs):\n subsets.remove(n)", "def unmask_all(self):\n self.Active.mask = False\n self.Confirmed.mask = False\n self.Deaths.mask = False\n self.NewDeaths.mask = False\n self.NewCases.mask = False", "def power_set(sett):\n\n powerset_so_far = {frozenset()}\n\n for element in sett:\n set.update(powerset_so_far,\\\n extend_all(element, powerset_so_far))\n \n return powerset_so_far", "def unchanged(self):\n return set(o for o in self.intersect\n if self.past_dict[o] == self.current_dict[o])", "def make_removal_bool(*bools):\n return(np.invert([np.any(f) for f in zip(*bools)]))", "def complement(self):\n if not self.alpha.isComplementable():\n raise RuntimeError(\"Attempt to complement a Distrib \"\n \"based on a non-complementable alphabet.\")\n coms = self.alpha.getComplements()\n new_count = []\n for idx in range(len(coms)):\n cidx = coms[idx]\n if cidx == None:\n cidx = idx\n new_count.append(self.cnt[cidx])\n self.cnt = new_count\n return self", "def ind_complement(inds, n):\n return tuple(i for i in range(n) if i not in inds)", "def sat_solve(self):\n # YOUR CODE HERE\n o = frozenset()\n if self.isfalse:\n return False\n elif self.istrue:\n return set()\n l = self.generate_candidate_assignments()\n print(\"assignments,\", l)\n for i in l:\n st = sat_apply_assignment(self, i)\n print(\"i:\", i, \"new set\", st)\n\n if st.istrue:\n return {i}\n elif not st.isfalse:\n sat_solve(st)\n\n return {i}", "def _one_of_k_encoding_unk(self, x, allowable_set):\r\n if x not in allowable_set:\r\n x = allowable_set[-1]\r\n return list(map(lambda s: x == s, allowable_set))", "def merge_sets(sets):\n idxs_skipped = []\n n = len(sets)\n for i in range(n-1):\n if i not in idxs_skipped:\n set_i = sets[i]\n for j in range(i+1,n):\n set_j = sets[j]\n if set_i.intersection( set_j ) > set([]):\n sets[i].update( set_j )\n idxs_skipped.append( j )\n sets_u = [ sets[k] for k in np.setdiff1d(range(n), idxs_skipped).astype(np.int) ]\n return sets_u", "def __listminus(self, c1, c2):\n s2 = {}\n for delta in c2:\n s2[delta] = 1\n \n\tc = []\n\tfor delta in c1:\n\t if not s2.has_key(delta):\n\t\tc.append(delta)\n\n\treturn c", "def _compute_complement(self, complement):\n if self._.d == 2 and complement is not False:\n if complement is None:\n complement = self._complement()\n self._.complement = self.add_subscheme(complement, \"complement\")", "def all_different(variables) :\n constraints = []\n for i in xrange(len(variables)):\n var1 = variables[i]\n for j in xrange(i+1,len(variables)):\n var2 = variables[j]\n if var1!=var2:\n constraints.append(Constraint(var1,var2,constraint_different))\n return constraints", "def schur_complement(m11, m12, m21, m22, return_complement_11=False):\n m_m22 = m11 - m12.matmul(m22.inverse().matmul(m21))\n if return_complement_11:\n m_m11 = m22 - m21.matmul(m11.inverse().matmul(m12))\n return (m_m22, m_m11)\n else:\n return m_m22", "def complement(self):\n result = self.completion()\n for state in result.iter_states():\n state.is_final = not state.is_final\n\n return result", "def one_of_k_encoding_unk(x, allowable_set):\n if x not in allowable_set:\n x = allowable_set[-1]\n return list(map(lambda s: x == s, allowable_set))", "def one_of_k_encoding_unk(x, allowable_set):\n if x not in allowable_set:\n x = allowable_set[-1]\n return list(map(lambda s: x == s, allowable_set))", "def one_of_k_encoding_unk(x, allowable_set):\n if x not in allowable_set:\n x = allowable_set[-1]\n return list(map(lambda s: x == s, allowable_set))", "def add_all_different_constraint(self, variables):\n for (i, j) in self.get_all_possible_pairs(variables, variables):\n if i != j:\n self.add_constraint_one_way(i, j, lambda x, y: x != y)", "def __neg__(self):\n return self[::-1].complement", "def difference(stack):\n assertArity(stack, 2)\n rhs, lhs = stack.pop(), stack.pop()\n assertType(lhs, Set)\n assertType(rhs, Set)\n return Set(lhs - rhs)", "def one_of_k_encoding_unk(self, x, allowable_set):\n if x not in allowable_set:\n x = allowable_set[-1]\n return list(map(lambda s: x == s, allowable_set))", "def complement_of_cycle(n):\n return complement(nx.cycle_graph(n))", "def reverse_difference():", "def check_unconformant(self, not_found, local_set):\n not_missing = set()\n for title in local_set:\n if \"|\" in title:\n not_missing.add(re.search(\"\\|.*\\|\", title).group()[1:-1])\n not_missing.add(re.search(\"\\|.*$\", title).group()[1:].replace(\"|\", \"\"))\n not_missing.add(re.search(\"\\|.*$\", title).group()[1:].replace(\"|\", \"_\"))\n not_missing.add(re.search(\"\\|.*$\", title).group()[1:].replace(\"|\", \"\") + \"+\")\n\n not_missing = not_missing.intersection(not_found)\n\n return not_missing", "def filter_out_bans(mappings, bans):\n new_mappings = []\n for mapping in mappings:\n for ban in bans:\n if fnmatch.fnmatch(mapping.pattern, ban):\n break\n else:\n new_mappings.append(mapping)\n return new_mappings", "def one_of_k_encoding_unk(x, allowable_set):\n if x not in allowable_set:\n x = allowable_set[-1]\n return list(map(lambda s: x == s, allowable_set))", "def strict(cls):\n return frozenset()", "def remove_subset(set_, subset):\n ensure_set(set_)\n ensure_iterable(subset)\n\n for elem in subset:\n set_.remove(elem)", "def free_symbols(self) -> set[Basic]:\n empty: set[Basic] = set()\n return empty.union(*(a.free_symbols for a in self.args))", "def _mask(self, map_):\n return None" ]
[ "0.6654452", "0.6576406", "0.63831866", "0.62827057", "0.58516365", "0.5725783", "0.56848663", "0.5537928", "0.5518822", "0.54672354", "0.5461112", "0.5455276", "0.5429003", "0.5420453", "0.5417606", "0.54041857", "0.5387203", "0.5379959", "0.53690493", "0.5363554", "0.5359956", "0.53369766", "0.53304964", "0.53043926", "0.5292924", "0.5292573", "0.5292573", "0.5283176", "0.5259491", "0.5236802", "0.5230086", "0.51960504", "0.51926714", "0.5192342", "0.5188581", "0.5188535", "0.51726943", "0.5164565", "0.51474893", "0.5142057", "0.5141349", "0.51330286", "0.5112961", "0.51071936", "0.50995606", "0.5095885", "0.508392", "0.5081954", "0.50802714", "0.5077407", "0.507365", "0.5070103", "0.5064257", "0.50573015", "0.5052447", "0.5050962", "0.50398016", "0.50349563", "0.5034462", "0.5034462", "0.5031982", "0.50290036", "0.502623", "0.5022991", "0.5019423", "0.5019381", "0.5018318", "0.50127774", "0.50056076", "0.50040865", "0.5003375", "0.50027525", "0.49978608", "0.49864307", "0.49768123", "0.49766636", "0.49735573", "0.49606854", "0.49565956", "0.49507156", "0.49505723", "0.49502206", "0.4947411", "0.49460417", "0.49409446", "0.49403954", "0.49403954", "0.4940056", "0.49378875", "0.49312818", "0.4930999", "0.4926456", "0.49264202", "0.492248", "0.4922188", "0.4921569", "0.49199945", "0.48987308", "0.48883218", "0.4884373" ]
0.76911646
0
Add a given clause to the Map solver.
def add_clause(self, clause): self.solver.add_clause(clause) if self.dump is not None: self.dump.write(" ".join(map(str, clause)) + " 0\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_clause(self, clause):\n self.abstract_clauses.append(clause)", "def tell (self, clause):\n self.clauses.add(clause)", "def add_clause(self, clause, soft=False):\n\n # first, map external literals to internal literals\n # introduce new variables if necessary\n cl = list(map(lambda l: self._map_extlit(l), clause))\n\n if not soft:\n # the clause is hard, and so we simply add it to the SAT oracle\n self.oracle.add_clause(cl)\n else:\n self.soft.append(cl)\n\n # soft clauses should be augmented with a selector\n sel = cl[0]\n if len(cl) > 1 or cl[0] < 0:\n self.topv += 1\n sel = self.topv\n\n self.oracle.add_clause(cl + [-sel])\n\n self.sels.append(sel)", "def AddUseClause(self, use_clause):\n assert _IsValidUseClause(use_clause), use_clause\n self.use_clauses.append(use_clause)", "def add_clause(self, lits):\n\n self.nclauses += 1\n self.base_clauses.append(lits)", "def add_where_clause(self, clause):\r\n if not isinstance(clause, WhereClause):\r\n raise StatementException(\"only instances of WhereClause can be added to statements\")\r\n clause.set_context_id(self.context_counter)\r\n self.context_counter += clause.get_context_size()\r\n self.where_clauses.append(clause)", "def add_statement(rq_dict, statement, result_data_contents=\"graph\"):\n rq_dict[\"statements\"].append({\"statement\": statement})\n rq_dict[\"statements\"][-1][\"resultDataContents\"] = [result_data_contents]", "def add_assignment_clause(self, clause):\r\n if not isinstance(clause, AssignmentClause):\r\n raise StatementException(\"only instances of AssignmentClause can be added to statements\")\r\n clause.set_context_id(self.context_counter)\r\n self.context_counter += clause.get_context_size()\r\n self.assignments.append(clause)", "def parse_and_add_clause(self, line):\n clause = list()\n for literal in line.split():\n negated = 1 if literal.startswith('!') else 0\n variable = literal[negated:]\n if variable not in self.variable_table:\n self.variable_table[variable] = len(self.variables)\n self.variables.append(variable)\n encoded_literal = self.variable_table[variable] << 1 | negated\n clause.append(encoded_literal)\n self.clauses.append(tuple(clause))", "def add_change_clause(sv, nod, tree, vlu):\r\n clau=((Change, tree, None), vlu) \r\n if not clau in nod.clauses: nod.clauses+=[clau] # avoid duplicates\r", "def _add(\n self,\n instruction_name: str,\n qubits: Tuple[int, ...],\n entry: CalibrationEntry,\n ):\n self._map[instruction_name][qubits] = entry\n self._qubit_instructions[qubits].add(instruction_name)", "def add_criterion(self, criterion):\n self.criterion_entries.add(criterion)\n return self", "def handle_clause(clause,i,lit_to_clauses):\n for s in clause.split():\n l = int(s)\n if (l !=0):\n lit_to_clauses[dimacs2index(l)].append(i)\n link_literal_to_clause(l,i)", "def add_phrase(self, phrase: Phrase) -> None:\n self.phrase_string_map[phrase.phrase_string] = phrase\n self.phrase_type[phrase.phrase_string].add(\"phrase\")\n self.phrase_index[phrase.phrase_string] = phrase\n self.phrase_length_index[len(phrase.phrase_string)].add(phrase.phrase_string)\n self._index_phrase_words(phrase)\n self._index_phrase_tokens(phrase)", "def adder(where, what, value):\n if what in where:\n pass\n else:\n where[what] = value", "def AddCondition(self, name, expression):\n self.conditions[name] = expression", "def addConstraint(constraint, problem):\n problem += constraint", "def addConstraint(self, constraint: Constraint, /) -> None:\n ...", "def get_or_insert(cls, name):\n\t\tid = cls.normalize_name(name)\n\t\treturn super(Clause, cls).get_or_insert(id, name=name)", "def get_clause(self, clause_letter, clause_id = None):\n clause_type = {\"A\":\"subject\", \"B\":\"predicate\", \"C\":\"outcome\"}[clause_letter]\n if not clause_id:\n clause_id = get_random_id(self.soup, clause_type)\n clause_text = self.soup.find(clause_type, number=clause_id).description.text\n exec(\"self.\"+clause_letter+\"_clause = clause_text\")\n exec(\"self.\"+clause_letter+\"_clause_id = clause_id\")", "def log_clause(clause):\n t = str(clause)\n params = clause.compile().params\n\n def token(m):\n return repr(params[m.group(1)])\n\n logger.debug(re.compile(r':(\\w+)').sub(token, t))", "def add_constraint(self, constraint):\n self.constraints.append(constraint)", "def add_optimizer(self, optimizer):\n assert isinstance(optimizer, torch.optim.Optimizer)\n setattr(self, 'optimizer'+str(self._optimizer_counter), optimizer)\n self._optimizer_counter += 1\n # optimizer indexing : optimizer 0 is the optimizer for layer 0", "def addConstraintsPyOpt(self, optProb):\n if self.addToPyOpt:\n optProb.addCon(\n self.name + \"_thick\", lower=self.thickLower, upper=self.thickUpper, wrt=self.DVGeo.getVarNames()\n )\n\n optProb.addCon(\n self.name + \"_MAC\", lower=self.MACFracLower, upper=self.MACFracUpper, wrt=self.DVGeo.getVarNames()\n )", "def add(self, name, expression, level):\n assert isinstance(level, EnvironmentLevel)\n index = len(level.expressions)\n level.bindings[name] = index\n level.expressions.append(expression)", "def Add(self, ADD_Constraint):\r\n key = ADD_Constraint.conid\r\n if key in self.add_constraints:\r\n print(\"already has key...key=%s\\n%s\" % (key, str(ADD_Constraint)))\r\n else:\r\n self.add_constraints[key] = ADD_Constraint", "def _add_hints(self, **hints):\n self._hints.update(hints)", "def add_solver(library):\n def _add_solver(solver):\n if library:\n solvers.append(solver)\n return solver\n return _add_solver", "def add_word(self, word):\n word = self.map_word(word)\n super(InvariantLanguage, self).add_word(word)", "def add(self, pt, update_limits=True):\n self._add(pt)\n if update_limits:\n self.update_limits()", "def add_statement(self, statement):\n if not isinstance(statement, SqlStatement):\n raise ValueError('Input must be of the type SqlStatement')\n\n self._statements.append(statement)\n self._raw_statements.append(statement.sql())", "def add_constraint(self, constraint, problem):\n problem += constraint", "def addDecl(decl):\n # FIXME we need to handle the case where there is an existing unresolved\n # type in the map\n declList.append(decl)\n typeMap[decl.identifier] = decl", "def add_expression(binary_addr, s):\n\n assert not isinstance(s, labelmanager.Label) # TODO!?\n # TODO: Warn/assert if addr already in expressions? Allow overriding this via an optional bool argument?\n if binary_addr not in expressions:\n expressions[binary_addr] = s", "def include(self, map):\n self.map.update(map)", "def add(self, newSym):\n if newSym != \"?\":\n self.n = self.n + 1\n self.has[newSym] = 1 + self.has.get(newSym, 0)\n if self.has[newSym] > self.most:\n self.most = self.has[newSym]\n self.mode = newSym", "def add_keyword_map(self, kwmap):\n kw_cond = SQLBinaryExpr(SQLBinaryExpr(COL_NAME_KWMAP_KWID, OP_EQ, kwmap[COL_NAME_KWMAP_KWID]),\n OP_AND,\n SQLBinaryExpr(COL_NAME_KWMAP_MEASID, OP_EQ, kwmap[COL_NAME_KWMAP_MEASID]))\n entries = self.select_generic_data(table_list=[TABLE_NAME_KWMAP], where=kw_cond)\n if len(entries) <= 0:\n if self.sub_scheme_version < CAT_ACTIVE_VERSION:\n kwmapid = self._get_next_id(TABLE_NAME_KWMAP, COL_NAME_KWMAP_KWMAPID)\n kwmap[COL_NAME_KWMAP_KWMAPID] = kwmapid\n self.add_generic_data(kwmap, TABLE_NAME_KWMAP)\n else:\n self.add_generic_data(kwmap, TABLE_NAME_KWMAP)\n entries = self.select_generic_data(table_list=[TABLE_NAME_KWMAP], where=kw_cond)\n kwmapid = entries[0][COL_NAME_KWMAP_KWMAPID]\n return kwmapid\n else:\n tmp = \"Keyword '%s' \" % (kwmap[COL_NAME_KWMAP_KWID])\n tmp += \"is already assigned to file \"\n tmp += \"'%s'.\" % (kwmap[COL_NAME_KWMAP_MEASID])\n if self.error_tolerance < ERROR_TOLERANCE_LOW:\n raise AdasDBError(tmp)\n else:\n warn(tmp)\n if len(entries) == 1:\n return entries[0][COL_NAME_KWMAP_KWMAPID]\n elif len(entries) > 1:\n tmp = \"Keyword mapping of file '%s' \" % (kwmap[COL_NAME_KWMAP_MEASID])\n tmp += \"cannot be resolved because it is ambiguous. (%s)\" % entries\n raise AdasDBError(tmp)", "def add_optimizer(self, optimizer):\n assert isinstance(optimizer, torch.optim.Optimizer)\n setattr(self, 'optimizer', optimizer)", "def relax(self, clause):\n query = self.statement.split()\n i=0\n\n hasWhereOrHaving=False\n hasFrom=False\n\n while i<len(query):\n token = query[i]\n lower_token = token.lower()\n if lower_token == \"from\":\n hasFrom=True\n\n elif lower_token in [\"where\", \"having\"] and hasFrom:\n hasWhereOrHaving=True\n query[i] = \" \".join([token, clause, \"or\"])\n i+=1\n\n if hasFrom and not hasWhereOrHaving:\n query.append(\"where \"+clause)\n\n return self.set_child_and_return(' '.join(query))", "def add_query_to_article(article, query=''):\n article['query'] = query\n return article", "def addpredicate(self, pred):\n self._preds.append(pred)", "def append(self, m=None):\n if m is None:\n m = {}\n if self.mapper is not None:\n m = self.mapper(m)\n self._maps.insert(0, m)", "def add_aux_clause(self, lits):\n\n self.nclauses += 1\n self.aux_clauses.append(lits)", "def add_program(self, new_instruction):\n if self._programs is None:\n self._programs = new_instruction\n else:\n self._programs += self._separator + new_instruction", "def add_site(self, M):\n###############################################################################\n \n self.estimates[M.code, M.soln] = M", "def add(self, mu, **which):\n # Handle case where mu already seen\n k, _ = self.find(mu)\n if k is not None:\n self.which[k].update(which)\n return k\n # Handle case where mu not seen\n self.mu_db.append(mu.copy())\n self.which.append(which)\n return self.offset+len(self.mu_db)-1", "def add_poss(self, sqs=None, gates=None):\n if sqs:\n self.orig_sqs.update(set(sqs))\n if gates:\n self.orig_gates.update(set(gates))\n self.recalc()\n self.reset_hints()", "def addMapping(self, protocol, match, result,\n chain=None, mapping_type='lfn-to-pfn'):\n entry = {}\n entry.setdefault(\"protocol\", protocol)\n entry.setdefault(\"path-match-expr\", re.compile(match))\n entry.setdefault(\"path-match\", match)\n entry.setdefault(\"result\", result)\n entry.setdefault(\"chain\", chain)\n self[mapping_type].append(entry)", "def add_manual_conditions(self, modcell, condition_dict):\n self._add_conditions(modcell, condition_dict)", "def addMapping(mapping):\n defaultMapping_.addMapping(mapping)", "def register_optimizer(name, opt):\r\n if name in predefined_optimizers:\r\n raise ValueError('Optimizer name already taken: %s' % name)\r\n predefined_optimizers[name] = opt", "def addEntry(self, symbol, address):\n self.table[symbol] = address", "def add_constraint(self, constraint):\n self._ckey += 1\n self.constraints[self._ckey] = constraint", "def add_operation(self, op):\n\n self.operations[op.name] = op", "def add(self, key: str, constraints_fcn: Callable, **kwargs: Any):\n constraints, constraints_jacobian, constraints_double_derivative = constraints_fcn(**kwargs)\n super(HolonomicConstraintsList, self)._add(\n key=key,\n constraints=constraints,\n constraints_jacobian=constraints_jacobian,\n constraints_double_derivative=constraints_double_derivative,\n )", "def add_column_parameter(params, name, dataset, args, key):\n column_id = args.get_value(key, raise_error=False)\n if column_id is None:\n return\n column = dataset.column_by_id(column_id)\n params.append(name + '(' + column.name_in_rdb + ')')", "def addtoks(indict): # type: ({}) -> {}\n\n if 'Clause Text' not in indict:\n return indict\n\n cleantext = helpers.cleantext(indict['Clause Text'])\n indict['Tokens'] = helpers.allngrams(cleantext, 1, 3)\n\n return indict", "def add(self, data, connector):\n if not isinstance(data, (list, tuple)):\n return super(WhereNode, self).add(data, connector)\n\n obj, lookup_type, value = data\n alias, col, field = obj.alias, obj.col, obj.field\n\n if not hasattr(field, \"geom_type\"):\n # Not a geographic field, so call `WhereNode.add`.\n return super(GeoWhereNode, self).add(data, connector)\n else:\n if isinstance(value, SQLEvaluator):\n # Getting the geographic field to compare with from the expression.\n geo_fld = self._check_geo_field(value.opts, value.expression.name)\n if not geo_fld:\n raise ValueError('No geographic field found in expression.')\n\n # Get the SRID of the geometry field that the expression was meant \n # to operate on -- it's needed to determine whether transformation \n # SQL is necessary.\n srid = geo_fld.srid\n\n # Getting the quoted representation of the geometry column that\n # the expression is operating on.\n geo_col = '%s.%s' % tuple(map(qn, value.cols[value.expression]))\n\n # If it's in a different SRID, we'll need to wrap in \n # transformation SQL.\n if not srid is None and srid != field.srid and SpatialBackend.transform:\n placeholder = '%s(%%s, %s)' % (SpatialBackend.transform, field.srid)\n else:\n placeholder = '%s'\n\n # Setting these up as if we had called `field.get_db_prep_lookup()`.\n where = [placeholder % geo_col]\n params = ()\n else:\n # `GeometryField.get_db_prep_lookup` returns a where clause\n # substitution array in addition to the parameters.\n where, params = field.get_db_prep_lookup(lookup_type, value)\n\n # The annotation will be a `GeoAnnotation` object that\n # will contain the necessary geometry field metadata for\n # the `get_geo_where_clause` to construct the appropriate\n # spatial SQL when `make_atom` is called.\n annotation = GeoAnnotation(field, value, where)\n return super(WhereNode, self).add(((alias, col, field.db_type()), lookup_type, annotation, params), connector)", "def __init__(self, clause):\n if isinstance(clause, Clause):\n # We use frozenset here, so that clauses are not modifiable.\n # This ensures that two equal clauses always have the same hash,\n # due also to our definition of the __hash__ method.\n self.literals = frozenset(clause.literals)\n else:\n for i in clause:\n # Sanity check.\n assert isinstance(i, int), \"Not an integer: %r\" % i\n self.literals = frozenset(clause)", "def set_mapping_constraint(self, constraint):\n placementConstraint = lib_map.VertexConstraints()\n if 'x' in constraint:\n placementConstraint.x = constraint['x']\n if 'y' in constraint:\n placementConstraint.y = constraint['y']\n if 'p' in constraint:\n placementConstraint.p = constraint['p']\n self.vertex.constraints = placementConstraint", "def addWord(wmap, tok, lem):\n\n if (not tok in tt.setStopWords) and (not (tok.isupper() and tok.lower() in tt.setStopWords)): #Don't add stopwords - but be carful US vs us\n olem = lem\n lem = lem.lower() # makes many things simpler\n if tok in wmap: # tok is mapped already..., this is needed, sometimes the lemmatizing is inconsistent, eg. \"prototyping\" might go to \"prototyping\" or \"prototype\"\n if wmap[tok] != lem: #token exists in map, but is mapped differently\n clem = wmap[tok]\n if len(lem) < len(clem): ##new word is shorter (usually this means no plural form or so), eg. houses vs house\n if not clem in wmap or wmap[clem] == clem: #if not exists, add new mapping from old lemma of word to new lemma,eg. if mwords[Houses]=houses then we add mwords[houses]=house\n wmap[clem] = lem\n else:\n if not lem in wmap or wmap[lem] == lem: #existing lemma is shorter, we map to new lemma to the existing one\n wmap[lem] = wmap[tok]\n lem = wmap[tok]\n wmap[tok] = lem\n wmap[lem] = lem # a lemma maps to itself (maybe difference in capitalization)\n if olem != lem: wmap[olem] = lem # a lemma maps to itself\n if len(tok) > len(lem) and not tok.islower(): #if have Responsibilities -> responsibility, than add responsibilities -> responsibility, the \">=\" might be changed to \">\" without much loss\n addWord(wmap,tok.lower(),lem)", "def AddColumn(self, column):\n self.columns.append(column)\n self.column_dict[column.column_id] = column", "def addAlias(self, alias, node):", "def _assign_pattern(where, key, what):\n if what:\n where[key] = what", "def addconfiguration(self, name, q, unit='rad'):\n v = getvector(q, self.n)\n v = getunit(v, unit)\n self._configdict[name] = v\n setattr(self, name, v)", "def add_phase(self, phase):\n\n if not phase.name in self.phase_dict:\n self.phase_dict[phase.name] = phase\n else:\n if phase.energy < self.phase_dict[phase.name].energy:\n self.phase_dict[phase.name] = phase\n self._phases.append(phase)\n phase.index = len(self._phases)\n\n for elt in phase.comp:\n self.phases_by_elt[elt].add(phase)\n self.phases_by_dim[len(phase.comp)].add(phase)\n\n self.space |= set(phase.comp.keys())", "def add_op(self, op):\n self._operations.append(op)", "def build_match_phrase_clause(field, string):\r\n answer = {}\r\n tmp = {}\r\n tmp[field] = string\r\n answer['match_phrase'] = tmp\r\n return answer", "def add(self, score, factor, name=\"\"):\n\t\tself._current_score += score * factor\n\t\tself._sum_factor += factor\n\n\t\tif name:\n\t\t\tself._map[name] = (score, factor)", "def register_optimizer(key, module):\n register(key, module, optimizer_dict)", "def add_new_mentor(self, mentor_dict:dict):\n\t\t_id = self.mentors.insert_one(mentor_dict)\n\n\t\tself.matcher.add_mentor(mentor_dict)\n\t\treturn str(_id.inserted_id)", "def __add__(self, other: MapValue) -> MapValue:\n return ops.MapMerge(self, other).to_expr()", "def add_rule_to_dict(rule_dict, lhs, rhs):\n if rhs not in rule_dict:\n rule_dict[rhs] = list()\n rule_dict[rhs].append(lhs) \n return rule_dict", "def add(self, symbol, value):\n if symbol in self.symbol_map:\n raise ValueError(f\"symbol {symbol} already exists in map.\")\n self.symbol_map[symbol] = value", "def AddOperation(self, op):\n self._operations.append(op)", "def add_dir_to_ifmodssl(self, aug_conf_path: str, directive: str, args: List[str]) -> None:\n # TODO: Add error checking code... does the path given even exist?\n # Does it throw exceptions?\n if_mod_path = self.get_ifmod(aug_conf_path, \"mod_ssl.c\")\n # IfModule can have only one valid argument, so append after\n self.aug.insert(if_mod_path + \"arg\", \"directive\", False)\n nvh_path = if_mod_path + \"directive[1]\"\n self.aug.set(nvh_path, directive)\n if len(args) == 1:\n self.aug.set(nvh_path + \"/arg\", args[0])\n else:\n for i, arg in enumerate(args):\n self.aug.set(\"%s/arg[%d]\" % (nvh_path, i + 1), arg)", "def add(self, mu, which):\n # Handle case where mu already seen\n k = self.find(mu)\n if k is not None:\n self.which[k].add(which)\n return k\n # Handle case where mu not seen\n self.mu_db.append(mu.copy())\n self.which.append(set([which]))\n return self.offset+len(self.mu_db)-1", "def add(self, word: str) -> None:\n self.d.add(word)", "def add(self, p, s, node) -> None:\n self.place.append(p)\n self.station.append(s)\n self.pi.append(node.pi[p, s] if p != float('inf') else float('inf'))\n self.noncoverage.append(node.noncoverage.left + node.noncoverage.right)\n self.cost.append(node.cost)\n self.delay.append(node.delay)\n self.step.append(node.key)", "def add_adjacent_vert(self, new_vert, vert_dict, action):\n self.adjacent_vert_dict[new_vert] = action\n vert_dict[new_vert.get_id()].pointed_vert_dict[self] = action", "def lvar_mapping_insert(*args):\n return _ida_hexrays.lvar_mapping_insert(*args)", "def addkeyword(self, line):\n self.__keywords.append(line)", "def add_mention(self, qid: str, mention: str, score: float):\n self._entity_symbols.add_mention(qid, mention, score)", "def add_query_pattern(gp, listener, args):\n gp = str(gp)\n if gp not in __graph_patterns.keys():\n __graph_patterns[gp] = set([])\n if listener is not None:\n __graph_patterns[gp].add((listener, args))", "def add_neighbor(self, node_name, cost):\n\t\tself.neighbors.append((node_name, cost))", "def add(self, flag_def):\n assert (flag_def.name not in self), \\\n ('Flag %r already defined' % flag_def.name)\n self._defs[flag_def.name] = flag_def", "def add_symptom(self, disease, results):\n self.symptoms[disease] = results", "def add_constraint(self, constraint):\n constraint_type = constraint[0]\n if constraint_type == 'time':\n dependent_variable = constraint[-2]\n dependee_variable = constraint[-1]\n dependent_index = self.subvariable_name.index(dependent_variable)\n dependee_index = self.subvariable_name.index(dependee_variable)\n constraint[-2] = self.value[dependent_index]\n constraint[-1] = self.value[dependee_index]\n if constraint_type in ['threshold', 'count']:\n threshold_variable = constraint[-1]\n threshold_index = self.subvariable_name.index(threshold_variable)\n constraint[-1] = self.value[threshold_index]\n if constraint_type == 'only_one':\n onlyone_variable = constraint[-1]\n onlyone_index = self.subvariable_name.index(onlyone_variable)\n constraint[-1] = self.value[onlyone_index]\n if constraint_type in self.constraint.keys():\n self.constraint[constraint_type] += [constraint[1:]]\n else:\n self.constraint[constraint_type] = [constraint[1:]]", "def add_vertex(self, vertex_id, dirs):\n self.vertices[vertex_id] = {i: \"?\" for i in dirs}", "def add_q_arg(self, q_arg):\n self._q_args.append(q_arg)\n self.add_decompostion(q_arg)", "def add_statement(self, statement):\n if self.check_statement(statement):\n self._statement = statement\n self.statement_status = 'OK'\n else:\n self._statement = None\n self.statement_status = 'X'", "def add_route(g, origin, destination, distance, choice_dir):\n origin_code = g.convert[origin]\n destination_code = g.convert[destination]\n distance = int(distance)\n # Add route both ways\n if(choice_dir == \"y\"):\n g.city_dict[origin_code].add_flights_in((destination_code, distance))\n g.city_dict[origin_code].add_flights_out((destination_code, distance))\n \n g.city_dict[destination_code].add_flights_in((origin_code, distance))\n g.city_dict[destination_code].add_flights_out((origin_code, distance))\n # Add route one way \n if(choice_dir == \"n\"):\n g.city_dict[origin_code].add_flights_out((destination_code, distance))\n g.city_dict[destination_code].add_flights_in((origin_code, distance))\n \n \n \n return g", "def add_op(self, expr):\n from cascada.bitvector import operation\n assert isinstance(expr, operation.Operation)\n assert not self.contain_op(expr)\n name = \"{}{}\".format(self.id_prefix, self.counter)\n self.counter += 1\n identifier = core.Variable(name, expr.width)\n self.table[identifier] = expr\n\n return identifier", "def insert(self, rule, ident):\n self[ident] = rule", "def add(self, v):\n if v != \"?\":\n self.n += 1\n self.lo = min(v, self.lo)\n self.hi = max(v, self.hi)\n\n if len(self.has) < the[\"nums\"]:\n self.has.append(v)\n self.is_sorted = False\n\n elif random.random() < the[\"nums\"] / self.n:\n pos = random.randint(0, len(self.has) - 1)\n self.has[pos] = v\n self.is_sorted = False", "def addName(self, dict, name, value):\n dict[name] = value", "def _add_module_to_map(self, model_key, module_dict):\n\n # Get the module name from the dict.\n if 'module' in module_dict:\n module_name = module_dict['module']\n elif 'omftype' in module_dict:\n module_name = module_dict['argument']\n else:\n # Bad dict.\n raise ValueError('Malformed module_dict: {}'.format(module_dict))\n\n # Ensure we aren't over-writing existing module.\n if module_name in self.model_map['module']:\n s = 'Module {} is already present!'.format(module_name)\n raise ItemExistsError(s)\n\n # Map it by name.\n self.model_map['module'][module_name] = [model_key, module_dict]", "def add_to_col(self, col: int, data_dict: dict):\n for key, val in data_dict.items():\n self.data.at[key, col] = val", "def map(self) -> global___Statement.Declaration:", "def _addalias(optid, optalias):\n\n optalias = _sanitizeName(optalias) # sanitize name so it conforms.\n with sqlite3.connect(DB) as db:\n cursor = db.cursor()\n try:\n cursor.execute('PRAGMA foreign_keys=ON')\n cursor.execute(\"INSERT INTO aliases VALUES (?, ?)\", (optid, optalias,))\n db.commit()\n return True\n except sqlite3.Error, e: # more descriptive error messages? (column name is not unique, foreign key constraint failed)\n print (\"ERROR: I cannot insert alias {0} to {1}: {0}\".format(optalias, optid, e))\n return None" ]
[ "0.7232271", "0.68253154", "0.64435804", "0.64024657", "0.6194685", "0.589618", "0.55395997", "0.549732", "0.5440252", "0.5343942", "0.5326947", "0.5320732", "0.52594405", "0.5190137", "0.5155156", "0.50672406", "0.49388227", "0.48930344", "0.48519868", "0.48471695", "0.48246765", "0.47988698", "0.4788916", "0.4788251", "0.47707832", "0.47665048", "0.47170556", "0.47126248", "0.46920457", "0.4686941", "0.46813583", "0.4677196", "0.46744165", "0.4659373", "0.46103704", "0.46051097", "0.46048877", "0.4593929", "0.45895", "0.45782408", "0.4567495", "0.45643362", "0.45633885", "0.45526448", "0.45287246", "0.4522926", "0.451104", "0.450846", "0.4502339", "0.4500232", "0.4491722", "0.44837862", "0.4464873", "0.4460101", "0.44520342", "0.4451005", "0.44443923", "0.44422626", "0.44397712", "0.4417852", "0.44087207", "0.4407571", "0.44057503", "0.4384741", "0.43832156", "0.43794152", "0.43646094", "0.43612278", "0.4360062", "0.43570656", "0.43556333", "0.4354242", "0.43460253", "0.4344181", "0.4341803", "0.43417263", "0.43412563", "0.43379387", "0.4332818", "0.4331827", "0.43296412", "0.43201718", "0.43194112", "0.43180948", "0.43071577", "0.43051127", "0.4304813", "0.43047988", "0.42996368", "0.4293603", "0.4291447", "0.42856154", "0.42745206", "0.42698336", "0.426312", "0.42619464", "0.42590064", "0.42583004", "0.42566013", "0.42554462" ]
0.75571716
0
Block down from a given set.
def block_down(self, frompoint): comp = self.complement(frompoint) clause = [(i + 1) for i in comp] self.add_clause(clause)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lift_down(self):\n\n # Can't reuse set_lift_pos due to bug above\n bottom_limit = self.get_lift_limit()\n self.send(self.cmd.SET_LIFT_SET, bottom_limit)", "def down(self, i):\n pass", "def bring_down(self):\n\n self.move(self.__min_step__)", "def difference_update(self, rng_set: Union[Rangelike, Iterable[Rangelike]]) -> None:\n self.discard(RangeSet._to_rangeset(rng_set))", "def down(self):\n if self.bottom == self.current:\n return\n else:\n self.current -= 1", "def unblock(self, node):\n\n self.blocked[node] = False\n Bnode = self.b_map[node]\n while Bnode:\n next_node = Bnode.pop(0)\n if self.blocked[next_node]:\n self.unblock(next_node)", "def _move_down(self):\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)", "def downCmd(self, *args):\n idx = self.tDisp.selIdx\n if idx != len(self.conflict.infeasibles) - 1:\n self.conflict.infeasibles.moveCondition(idx, idx + 1)\n self.conflict.recalculateFeasibleStates()\n self.event_generate('<<ValueChange>>')\n self.tDisp.selection_set(self.tDisp.selId)\n self.selChgCmd()", "def ramp_down(self) -> None:\n for stock in self.stocks:\n if stock.are_any_shares_owned():\n self.cash_balance = stock.sell(-1, self.cash_balance, self.buy_budget)", "def reverse_moves(self, moves_set):\n moves_set.reverse()\n for choice in tuple(moves_set):\n self.game.move([choice[0], -choice[1]])", "def unblock(self, source):\n raise NotImplementedError", "def rev(self):\n self.set.reverse()", "def complement(self, aset):\n return self.all_n.difference(aset)", "def down(self):\n self.move(0,-1)", "def unlock_nodes(set_name):\n\n flg = logging.getLogger(\"lettuce.xgenSetup.unlock_nodes\")\n\n if mc.objExists(set_name):\n for o in mc.sets(set_name, query=True):\n if mc.lockNode(o, query=True):\n flg.info(\"Unlocking {}\".format(o))\n mc.lockNode(o, lock=False)\n else:\n flg.warning(\"Set, {}, does not exist\".format(set_name))", "def removeFromSet(_session, _el, _set):\n it = _session.create_iterator(_session.sc_constraint_new(sc_constants.CONSTR_3_f_a_f,\n _set,\n sc.SC_ARC,\n _el), True)\n while not it.is_over():\n _session.erase_el(it.value(1))\n it.next()", "def unblock_all(t):\n blocked_count = 0\n\n while True:\n blocked_user_ids = t.blocks.ids()[\"ids\"]\n if not blocked_user_ids:\n print(\"No more IDs to unblock\")\n break\n\n for user_id in blocked_user_ids:\n blocked_count = blocked_count + 1\n print(f\"{blocked_count}: {user_id}\")\n try:\n t.blocks.destroy(user_id=user_id, include_entities=False, skip_status=True)\n except:\n print(\"error\")", "def drop_between(self, start, end):\n # catch all invalid args and throw an Index error if true\n if start < 0 or end > self.size or start > end:\n raise IndexError()\n # initialize node counter to 0\n counter = 0\n # current node for looping is front node\n current = self.front\n # while current node isn't None\n while current is not None:\n # if it's position is within start and end args\n if start <= counter < end:\n # skip the current node in the deque, effectively\n # deleting it\n current.prior.next = current.next\n # if current node's next is empty\n if current.next is None:\n # you have one node left, and you have to get\n # rid of it, so clear the deque and break\n self.size = 0\n self.front = None\n self.back = None\n break\n # set next node's prior to current's prior, effectively\n # skipping the current node in deque\n current.next.prior = current.prior\n # decrement size of deque\n self.size -= 1\n # add one to the counter\n counter += 1\n # move on to the next node who flows. He nose dove and sold\n # nada :)\n current = current.prior", "def downset(self,\n _sortkey=operator.attrgetter('dindex'),\n _next_concepts=operator.attrgetter('lower_neighbors')):\n return algorithms.iterunion([self], _sortkey, _next_concepts)", "def backward_shimmey(self):\n for x in range(6):\n self.right(primary=-70, counter=-30)\n time.sleep(.5)\n self.left(primary=-70, counter=-30)\n time.sleep(.5)\n self.stop()", "def backward_shimmey(self):\n for x in range(6):\n self.right(primary=-70, counter=-30)\n time.sleep(.5)\n self.left(primary=-70, counter=-30)\n time.sleep(.5)\n self.stop()", "def all_breakdown(self,toplevel=False,use_cache=True,getone=False, debug=False):\n todo_gates=self.gates.copy()\n breakdown_solutions=set()\n bat=0\n while todo_gates:\n if time.time()>self.timeout:\n return 'timeout'\n thisgate=todo_gates.pop()\n startpos,ns=thisgate\n self.set(startpos)\n self.gatesqs.remove(startpos)\n self.gates.remove(thisgate)\n dvs=[n[0] for n in [(0,(1,0)),(1,(0,1)),(2,(-1,0)),(3,(0,-1))] if not self.rows[startpos[1]+n[1][1]][startpos[0]+n[1][0]]]\n\n if ns==1:\n #(if it's a req gate)\n target_blocked=False\n indv=None\n for dv in dvs:\n if add(startpos,dv) in self.sqs:\n indv=dv\n break\n if indv==None:\n pass\n else:\n newdvs=[dv for dv in dvs if dv in [indv, (indv+1)%4, (indv-1)%4]]\n if newdvs!=dvs:\n dvs=newdvs\n #removes wrong way dvs totally!\n #adde the exception - it can occur when a room is reduced to just RR for example. not sure what to do then.\n for dv in dvs:\n dest=add(startpos,dv)\n if dest in self.sqs:\n if self.rows[dest[1]][dest[0]]==1:\n target_blocked=True\n break\n elif ns==2:\n dvs.append('skip')\n bat=0\n for indv in dvs:\n if indv=='skip':\n su=self.make_subrooms_from_current_state()[0]\n su.parent=self.hashid\n res=su.get_all_sols(use_cache=use_cache,getone=getone)\n if res=='timeout':\n self.gates.add(thisgate)\n self.gatesqs.add(startpos)\n self.unset(startpos)\n return res\n if not res:\n continue\n if getone and res:\n self.gates.add(thisgate)\n self.gatesqs.add(startpos)\n self.unset(startpos)\n return True\n this=(startpos,'skip',(),(),startpos)\n offset_amount=(self.xoffset-su.xoffset,self.yoffset-su.yoffset)\n for sr in res:\n fixed_sr=()\n for srgate in sr:\n start,sindv,path,covered,end=srgate\n newsr=(offset(start,offset_amount),sindv,tuple(path),tuple(offset(covered,offset_amount)),offset(end,offset_amount),)\n fixed_sr=fixed_sr+(newsr,)\n fixed_sr=(this,)+fixed_sr\n breakdown_solutions.add(fixed_sr)\n continue\n continuations=self.go_until_deadend_with_path(startpos,indv,[])\n if continuations=='timeout':\n self.gates.add(thisgate)\n self.gatesqs.add(startpos)\n self.unset(startpos)\n return 'timeout'\n for path,covered,stop in continuations:\n for g in self.gates:\n if g[0]==stop:\n endgate=g\n break\n self.gatesqs.remove(stop)\n self.gates.remove(endgate)\n #I have made my move. if room is done, return true.\n #if not done, try to solve my subrooms.\n\n self.set_many(covered)\n subrooms=self.make_subrooms_from_current_state()\n #it sucks that this makes them all... it should just make them one at a time. save a lot of work on big rooms.\n #90% of the time the smallest subroom is illegal\n #restore things to normal now; i made it out, and now the solution is up to the subrooms.\n self.gates.add(endgate)\n self.gatesqs.add(stop)\n self.unset_many(covered)\n initial_gate_exit=(startpos,indv,tuple(path),tuple(covered),stop,)\n partial_solutions=((initial_gate_exit,),)\n subrooms.sort(key=lambda x:len(x.gatesqs))\n subrooms_ok=True\n #actually the permut stuff is slightly off. we should find all permutations of sols within a room, not just perms of subrooms .\n if time.time()>self.timeout:\n self.gates.add(thisgate)\n self.gatesqs.add(startpos)\n self.unset(startpos)\n return 'timeout'\n subrooms_ok=True\n subroomreses={}\n for su in subrooms:\n if time.time()>self.timeout:\n self.gates.add(thisgate)\n self.gatesqs.add(startpos)\n self.unset(startpos)\n return 'timeout'\n subroomres=su.get_all_sols(toplevel=False,use_cache=use_cache,getone=getone)\n #SHOULD use cache here - just temp\n if subroomres=='timeout':\n self.gates.add(thisgate)\n self.gatesqs.add(startpos)\n self.unset(startpos)\n #resetting this stuff IS necessary!\n return 'timeout'\n\n #this is a bit weird. if one subroom times out it doesn't mean the others won'1! it's still possible\n #to get negative solutions from this point on - but no positive.\n #this res should be put in terms of the outer room, not this guy's room.\n #su's got absolute x,yoffsets, same as parent. but sols are coming in su's coord system. to convert to parent\n if getone and subroomres:\n continue\n if not subroomres:\n #that is, only if room definitely doesnt have a sol.\n subrooms_ok=False\n break\n subroomreses[su.hashid]=self.fix_subroomres(su, subroomres)\n if not subrooms_ok:\n #if this set of subrooms doesn't work just die. none of the others will work either.\n continue\n if getone:\n self.gates.add(thisgate)\n self.gatesqs.add(startpos)\n self.unset(startpos)\n return True\n #for each subroom order, choose one solution. then make one sol out of all the perms of that.\n if subrooms:\n aa=self.all_orderings_of_subroomreses(subroomreses)\n #and here we make all permutations of all the subrooms.\n including_this_subroom_partials=set()\n for partial in partial_solutions:\n for ii,a in enumerate(aa):\n if ii%128==0 and time.time()>self.timeout:\n self.gates.add(thisgate);self.gatesqs.add(startpos);self.unset(startpos);return 'timeout'\n if a=='timeout':\n self.gates.add(thisgate)\n self.gatesqs.add(startpos)\n self.unset(startpos)\n return 'timeout'\n combined=tuple(partial[:])+tuple(a[:])\n including_this_subroom_partials.add(combined)\n partial_solutions=including_this_subroom_partials\n #now you are done with this first stop/covered, are at another exit, and no subroom has failed. so, good on ya!\n if getone:\n self.gates.add(thisgate)\n self.gatesqs.add(startpos)\n self.unset(startpos)\n return True\n breakdown_solutions.update(partial_solutions)\n if breakdown_solutions:\n g_hassols_cache[self.key]=True\n self.gates.add(thisgate)\n self.gatesqs.add(startpos)\n self.unset(startpos)\n if getone:\n if not breakdown_solutions:\n return False\n return list(breakdown_solutions)", "def bubble_down(self, i):\n\n smallest = self.find_smallest(i)\n\n org = i\n\n while smallest != org:\n\n self.switch(org, smallest)\n\n org = smallest\n\n smallest = self.find_smallest(smallest)", "def backward(self, top, propagate_down, bottom):\n\t\tpass", "def eliminate_left_recursion(self,stop_at_set):\n assert self.is_canonical\n\n # Determine a definite ordering of the rules.\n # Use a DFS so we only have essential backedges.\n preorder_names = self.preorder()\n preorder_index = dict()\n for name in preorder_names:\n preorder_index[name] = len(preorder_index)\n\n # Break self-cycles via more than one step\n for i in range(1,len(preorder_names)):\n rule_name = preorder_names[i]\n if rule_name in stop_at_set:\n continue\n rule = self.rules[rule_name]\n replacement = []\n changed = False\n for rhs in rule.as_container():\n phrase = rhs.as_container()\n first = phrase[0]\n rest = phrase[1:]\n if first.is_symbol_name():\n first_name = first.content\n j = preorder_index[first_name]\n if (j < i) and (first_name not in stop_at_set):\n # Break this backedge\n Aj = self.rules[first_name].as_container()\n if len(rest) == 0:\n # Add Aj's alternatives to Ai's alternatives.\n # Aj is a choice node\n # The elements of Aj are already of suitable class.\n replacement.extend([delta for delta in Aj])\n else:\n # Rest is non-empty\n for delta in Aj:\n replacement.append(self.MakeSeq(list_without_empty(delta.as_container()) + rest))\n changed = True\n else:\n # Pass it through. It's not a backedge, or we've been\n # asked to stop here.\n replacement.append(rhs)\n else:\n # First thing is not a symbol name. Pass it through\n replacement.append(rhs)\n if changed:\n # Update with the new rule.\n self.rules[rule_name] = self.MakeChoice(replacement)\n\n # Finally, eliminate immediate left recursion.\n self.eliminate_immediate_recursion(self)", "def dec_gains_of_free_cells(self):\r\n for cell in self.cells:\r\n if not cell.locked:\r\n cell.gain -= 1\r\n cell.yank()", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\r\n pass", "def release_set(self):\n self._release_locks()\n if self._locks: # pragma: nocover\n # This shouldn't happen, it means we couldn't release our\n # locks, abort\n self._fail_out()\n return\n else:\n with self._state_change:\n if self.failed:\n return\n self.state = PartitionState.ALLOCATING\n self._child_watching(self._allocate_transition, async=True)", "def discard(self, discard_set, check=True):\n if check:\n value = 0\n for domino in discard_set:\n value += domino.get_value\n\n if value != self.number_point:\n raise BadSumException(self.number_point)\n\n for domino in discard_set:\n self.hand.remove(domino)", "def dec(self, by=1):\n assert by > 0\n self.counter -= by\n if self.counter <= 0:\n # Don't leave self.counter < 0, that will screw things up in\n # future calls.\n self.counter = 0\n # Transitioning from nonzero to 0 means wait() need no longer wait.\n self.event.send()", "def restrict(self):\n calls = []\n while self[1:] not in calls:\n calls.append(self[1:])\n self.restrict_once()", "def down(self):\n self.set_initial_offset(self.initial_offset + self.item_heights)", "def move_block_down(self):\n if not self.verify_legal_move(\"DOWN\"):\n # If it can't move down, place the block on the grid\n for b_x, b_y in self.get_block_positions(self.active_piece.FIGURE):\n self.board[b_y][b_x] = self.active_piece.COLOR\n\n self.check_clear_lines()\n\n self.spawn_new_piece()\n return\n\n self.active_piece.move_down()", "def sift_down(self, start, end):\n i, j = start, 2*start+1\n # Temporary variable to decrease exchange times\n temp = self.heap_list[start]\n # end is equal to len(self.heap_list)-1\n while j <= end:\n # compare left child node with right child node\n if j<end and self.heap_list[j]<self.heap_list[j+1]:\n j += 1\n if temp >= self.heap_list[j]:\n break\n else:\n #self.heap_list[i], self.heap_list[j] = self.heap_list[j], self.heap_list[i]\n self.heap_list[i] = self.heap_list[j]\n i = j\n j = 2*j+1\n self.heap_list[i] = temp", "def move_down(self):\n\t\treturn self._move(up=False)", "def lift_up(self):\n\n # Can't reuse set_lift_pos due to bug above\n self.send(self.cmd.SET_LIFT_SET, self.cmd.SET_LIFT_SET[\"check\"][\"min\"])", "def down(self, num):\r\n if not len(self.items):\r\n return\r\n self.item_sel += num\r\n if self.item_sel < 0:\r\n self.item_sel = 0\r\n if self.item_sel > len(self.items) - 1:\r\n self.item_sel = len(self.items) - 1\r\n\r\n last_line = self.height - 1 - self.reserved_lines\r\n if self.item_sel < self.item_top:\r\n self.item_top = self.item_sel\r\n if self.item_sel - self.item_top > last_line:\r\n self.item_top = self.item_sel - last_line\r\n\r\n self.do_paint()", "def bottom_up(self, safe=False):\n if safe:\n assert not self.cycle()\n discard = set()\n queue = deque(self.leaves())\n while queue:\n new = queue.popleft()\n if new.children() - discard:\n queue.append(new)\n else:\n discard.add(new)\n for parent in sorted(new.parents(), key=lambda x:x.nodeid):\n if not parent in discard and not parent in queue:\n queue.append(parent)\n yield new", "def _stop_attack(self):\n self._add_malicious_blocks_to_honest_dag()\n self._competing_chain_tip_gid = None\n self._first_parallel_block_gid = None", "def discard(self, rng: Rangelike) -> None:\n # be lazy and do O(n^2) erasure\n if isinstance(rng, RangeSet):\n temp = self.copy()\n for r in rng:\n temp.discard(r)\n self._ranges = temp._ranges\n return\n # elif _is_iterable_non_string(rng):\n # raise ValueError(\"argument is iterable and not range-like. Use .difference_update() instead\")\n # make sure rng is a Range\n rng = Range(rng)\n # remove rng from our ranges until we no longer need to\n current_node = self._ranges.first\n while current_node:\n new_range = current_node.value.difference(rng)\n if not new_range or new_range.isempty():\n # first node is entirely consumed by the range to remove. So remove it.\n self._ranges.pop_node(current_node)\n elif isinstance(new_range, RangeSet):\n # replace current value with lower, and add higher just afterwards.\n # It can't possibly overlap with the next range, because they are disjoint.\n current_node.value = new_range._ranges.first.value\n self._ranges.insert_after(current_node, new_range._ranges.last.value)\n # in this case, we also know that we just hit the top of the discarding range.\n # therefore, we can short-circuit.\n break\n else:\n # replace just this element, which was cut off\n if new_range > current_node.value:\n # we're only computing the difference of one contiguous range.\n # if all we've done is cut off the bottom part of this range, then\n # we must have reached the top of the discarding range.\n # therefore, we can short-circuit.\n current_node.value = new_range\n break\n else:\n # otherwise, we just change this element (maybe replace it with itself) and keep going.\n current_node.value = new_range\n current_node = current_node.next", "def symmetric_difference_update(self, rng_set: Union[Rangelike, Iterable[Rangelike]]) -> None:\n # the easiest way to do this is just to do regular symmetric_difference and then copy the result\n rng_set = RangeSet._to_rangeset(rng_set)\n self._ranges = self.symmetric_difference(rng_set)._ranges", "def throttle_down( self ) ->(int,int):\n return None, None", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def backward(self, top, propagate_down, bottom):\n pass", "def move_down(self, lifting, **kwargs):\n self.log.debug(\"Moving table down by {!s} microns\".format(lifting))\n if self.variables[\"Table_state\"]:\n success = self.move_to([0, 0, -lifting], False, 0, True, **kwargs)\n if success:\n self.variables[\"Table_state\"] = False\n return success\n else:\n self.queue.put({\"Info\": \"Table already in the down position...\"})\n return True", "def percolate_down(self, index):\n if self.min is True:\n small_child = self.min_child(index)\n if small_child is not None:\n if self._data[small_child] < self._data[index]:\n self._swap(index, small_child)\n self.percolate_down(small_child)\n if self.min is False:\n large_child = self.max_child(index)\n if large_child is not None:\n if self._data[large_child] > self._data[index]:\n self._swap(index, large_child)\n self.percolate_down(large_child)", "def _swim(self):\n child_ix = len(self) - 1\n parent_ix = self._get_parent(child_ix)\n while (parent_ix is not None and self._test(parent_ix, child_ix)):\n self._exch(parent_ix, child_ix)\n child_ix = parent_ix\n parent_ix = self._get_parent(parent_ix)", "def move_down(self):\n self.move_step(1)", "def all_off(self):\n self.fill_off()\n self.update()\n self.fill_off()\n self.update()", "def drop(self):\n for step in self.steps:\n step[1].drop()", "def demote(self):\n self.is_promoted = False\n self.blockable_moves_sets = type(self).blockable_moves_sets\n self.unblockable_moves = type(self).unblockable_moves", "def move_down(self, request):\n return self._move(False, request)", "def backward(self):\n raise NotImplemented", "def backward(self):\n raise NotImplemented", "def backward(self):\n raise NotImplemented", "def example_deletion_with_block_lowering(self):\n i = 0\n while i < len(self.shrink_target.blocks):\n if not self.is_shrinking_block(i):\n i += 1\n continue\n\n u, v = self.blocks[i].bounds\n\n j = 0\n while j < len(self.shrink_target.examples):\n n = int_from_bytes(self.shrink_target.buffer[u:v])\n if n == 0:\n break\n ex = self.shrink_target.examples[j]\n if ex.start < v or ex.length == 0:\n j += 1\n continue\n\n buf = bytearray(self.shrink_target.buffer)\n buf[u:v] = int_to_bytes(n - 1, v - u)\n del buf[ex.start : ex.end]\n if not self.incorporate_new_buffer(buf):\n j += 1\n\n i += 1", "def _downward(self, root=0):\n\n if self._verbosity > 0:\n print(\"sending messages towards the leaf nodes\", end=\"\", flush=True)\n ready_to_send = set([root])\n while len(ready_to_send) > 0:\n current = ready_to_send.pop()\n self.clique_beliefs[current] = self._calc_message(current, self.children[current], False)\n ready_to_send.update(self.children[current])\n if self._verbosity > 0:\n print(\".\", end=\"\", flush=True)\n if self._verbosity > 0:\n print(\"\", end=\"\\n\", flush=True)", "def _unshard(\n self,\n handles: List[FlatParamHandle],\n ) -> None:\n if not handles:\n return\n if self.limit_all_gathers:\n event = self._free_event_queue.dequeue_if_needed()\n if event:\n event.synchronize()\n any_ran_pre_unshard = False\n with torch.cuda.stream(self._streams[\"pre_all_gather\"]):\n for handle in handles:\n ran_pre_unshard = handle.pre_unshard()\n any_ran_pre_unshard = any_ran_pre_unshard or ran_pre_unshard\n if any_ran_pre_unshard:\n self._streams[\"all_gather\"].wait_stream(self._streams[\"pre_all_gather\"])\n with torch.cuda.stream(self._streams[\"all_gather\"]):\n for handle in handles:\n handle.unshard()\n handle.post_unshard()", "def rollback(self, mapset=None):\n if mapset is None:\n mapset = self.current_mapset", "def move_down(self):\n client.moveByVelocityAsync(0, 0, -1, 0.3).join()\n # if self.logging:\n # self.log_arr.append(\"down\")", "def _drop(self, all=False):\n if all:\n self._queue.clear()\n else:\n self._queue.pop(0)\n self._redraw_queue()", "def _straight(self):\r\n \r\n flow = min(self.upstream.demand, self.downstream.supply)\r\n self.upstream.outflow = flow\r\n self.downstream.inflow = flow", "def _moveChainNode(self, chain, sets, buckets):\n #self._printBucketState() \n e = chain.pop()\n if e in self._locked:\n print \"error! can't move locked node.\"\n \n self.sets[sets[0]].remove(e)\n self.sets[sets[1]].add(e)\n #remove myself from my bucket\n self._buckets[e.biPartBuckets[0]][e.biPartChain[1]]\n #e.biPartBuckets = [e.biPartBuckets[1],e.biPartBuckets[0]]\n e.biPartBuckets.reverse()\n e.biPartSets.reverse()\n\n #e.biPartSets = [e.biPartSets[1],e.biPartSets[0]]\n\n #find new gain: (We can actually skip this step)\n #self._addToBucket(buckets[1], e, sets[1], sets[0])\n # add moved node to locked set\n self._locked.add(e)\n for n in (e.parents + e.children):\n self._updateBucketNode(n)\n\n #self._printBucketState() \n return", "def remove(self, pset):\n self._sets.remove(pset)", "def block_candset(self, candset, verbose=False, show_progress=True,\n n_chunks=1):\n logger.warning(\n \"WARNING THIS COMMAND IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN RISK.\")\n\n # validate data types of input parameters\n self.validate_types_params_candset(candset, verbose, show_progress,\n n_chunks)\n\n # get and validate metadata\n log_info(logger, 'Required metadata: cand.set key, fk ltable, ' +\n 'fk rtable, ltable, rtable, ltable key, rtable key', verbose)\n\n # # get metadata\n key, fk_ltable, fk_rtable, ltable, rtable, l_key, r_key = cm.get_metadata_for_candset(\n candset, logger, verbose)\n\n # # validate metadata\n cm._validate_metadata_for_candset(candset, key, fk_ltable, fk_rtable,\n ltable, rtable, l_key, r_key,\n logger, verbose)\n\n # validate rules\n assert len(self.rules.keys()) > 0, 'There are no rules to apply'\n\n # validate n_chunks parameter\n validate_object_type(n_chunks, int, 'Parameter n_chunks')\n validate_chunks(n_chunks)\n\n n_chunks = get_num_partitions(n_chunks, len(candset))\n # do blocking\n\n # # initialize the progress bar\n # if show_progress:\n # bar = pyprind.ProgBar(len(candset))\n\n # # set index for convenience\n l_df = ltable.set_index(l_key, drop=False)\n r_df = rtable.set_index(r_key, drop=False)\n\n # # get attributes to project\n l_proj_attrs, r_proj_attrs = self.get_attrs_to_project(l_key, r_key,\n [], [])\n l_df, r_df = l_df[l_proj_attrs], r_df[r_proj_attrs]\n\n c_df = self.block_candset_excluding_rule(candset, l_df, r_df, l_key,\n r_key,\n fk_ltable, fk_rtable, None,\n show_progress, n_chunks)\n\n # update catalog\n cm.set_candset_properties(c_df, key, fk_ltable, fk_rtable, ltable,\n rtable)\n\n # return candidate set\n return c_df", "def drop_below(adict,limit):\n # Hint: Create a list of netids to drop, and THEN drop them\n for key in list(adict):\n if adict[key] < limit:\n del adict[key]", "def move_lift_down():\n return _move_lift(0.2)", "def grab(amounts: Dict[str, int]) -> None:\n for name, amount in amounts.items():\n assert 0 <= amount <= Resources.available[name]\n Resources.available[name] -= amount", "def drop_lowest_set_bit(x):\n\n return x & (x - 1)", "def requestUnblockVis(self):\n if self.__nextSetZoneDoneEvent is None:\n self.__nextSetZoneDoneEvent = self.level.cr.getNextSetZoneDoneEvent()\n self.acceptOnce(self.__nextSetZoneDoneEvent, self.okToUnblockVis)\n # make sure that a setZone is sent this frame, even if the\n # visibility list doesn't change\n self.level.forceSetZoneThisFrame()", "def release(self):\n if self.points > 0 and self.waiting:\n self.points = self.points - 1\n d = self.waiting.pop(0)\n d.callback(self)", "def event_ball_search_unblock(self, **kwargs):\n del kwargs\n self.ball_search.unblock()", "def dec(self, key: str) -> None:\n if key not in self.mapping:\n return\n cur_block = self.mapping[key]\n del self.mapping[key]\n cur_block.keys.remove(key)\n\n if cur_block.val != 1:\n if cur_block.val - 1 != cur_block.prev.val:\n new_block = Block(cur_block.val - 1)\n cur_block.prev.insert_after(new_block)\n else:\n new_block = cur_block.prev\n new_block.keys.add(key)\n self.mapping[key] = new_block\n\n if not cur_block.keys:\n cur_block.remove()", "def eliminate(self):\n deleteKey = []\n for key,value in self._sets[self._currentSet].items():\n if value < self._minSupport:\n deleteKey.append(key)\n \n for key in deleteKey:\n del self._sets[self._currentSet][key]", "async def test_blocking_unblock(self):\n async def unblock():\n await asyncio.sleep(0.1)\n await self.redis.rpush('list', 'y')\n\n with await self.redis as r:\n task = self.loop.create_task(unblock())\n result = await r.blpop('list', timeout=1)\n assert result == [b'list', b'y']\n await task", "def block_one(self):", "def _pullchangeset(pullop):\n # We delay the open of the transaction as late as possible so we\n # don't open transaction for nothing or you break future useful\n # rollback call\n if b'changegroup' in pullop.stepsdone:\n return\n pullop.stepsdone.add(b'changegroup')\n if not pullop.fetch:\n pullop.repo.ui.status(_(b\"no changes found\\n\"))\n pullop.cgresult = 0\n return\n tr = pullop.gettransaction()\n if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:\n pullop.repo.ui.status(_(b\"requesting all changes\\n\"))\n elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):\n # issue1320, avoid a race if remote changed after discovery\n pullop.heads = pullop.rheads\n\n if pullop.remote.capable(b'getbundle'):\n # TODO: get bundlecaps from remote\n cg = pullop.remote.getbundle(\n b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads\n )\n elif pullop.heads is None:\n with pullop.remote.commandexecutor() as e:\n cg = e.callcommand(\n b'changegroup',\n {\n b'nodes': pullop.fetch,\n b'source': b'pull',\n },\n ).result()\n\n elif not pullop.remote.capable(b'changegroupsubset'):\n raise error.Abort(\n _(\n b\"partial pull cannot be done because \"\n b\"other repository doesn't support \"\n b\"changegroupsubset.\"\n )\n )\n else:\n with pullop.remote.commandexecutor() as e:\n cg = e.callcommand(\n b'changegroupsubset',\n {\n b'bases': pullop.fetch,\n b'heads': pullop.heads,\n b'source': b'pull',\n },\n ).result()\n\n bundleop = bundle2.applybundle(\n pullop.repo, cg, tr, b'pull', pullop.remote.url()\n )\n pullop.cgresult = bundle2.combinechangegroupresults(bundleop)", "def shiftDeque(d):\n while True:\n try:\n yield d.popleft()\n except IndexError:\n break", "def shrink_down(self, cidx, amt):\n left = amt # track unused shrink amount\n # for each client after specified index\n for idx in range(cidx + 1, len(self.relative_sizes)):\n # shrink by current total left-over amount\n left -= left - self._shrink(idx, left)\n # return unused shrink amount\n return left", "def toggle(collection: set[_T], item: _T | None) -> set[_T]:\n\n if item is None:\n return collection\n\n if item in collection:\n return collection - {item}\n else:\n return collection | {item}", "def block(self):\n\t\t\tself.blocked_timestamp = time.time()\n\t\t\tself.status = 'blocked'\n\t\t\tthe_blacklist.export_blocklist()", "def link_graph_and_safe_set(graph, safe_set):\n for node, next_node in graph.edges_iter():\n edge = graph[node][next_node]\n edge['safe'] = safe_set[node:node + 1, edge['action']]", "def greedy_shrink(self):\n while self.single_greedy_shrink_iteration():\n self.run_shrink_pass(\"lower_common_block_offset\")", "def blocks(self):\n pass", "def percolate_down(self, index):\n child = self.max_child(index)\n\n # swap if child is less than than current and continue percolating\n if child and self._data[child] < self._data[index]:\n self.swap(child, index)\n self.percolate_down(child)", "def setToDead(self, state=DeadState()):\n if len(self._lastStates) > 5:\n self._lastStates.pop(0)\n self._lastStates.append(self._state)\n self._state = state\n self._nextState = self._state\n return self._state" ]
[ "0.591464", "0.56556416", "0.5639226", "0.55734926", "0.5487892", "0.5392101", "0.5380977", "0.53320444", "0.53086597", "0.51837474", "0.5178055", "0.51443833", "0.5077661", "0.5057524", "0.5054124", "0.50266695", "0.49867174", "0.4948253", "0.49368462", "0.49265483", "0.49265483", "0.492263", "0.4913674", "0.490976", "0.4902604", "0.4898716", "0.48914716", "0.48914716", "0.48914716", "0.48914716", "0.48914716", "0.48914716", "0.48914716", "0.48914716", "0.48914716", "0.48914716", "0.48914716", "0.48914716", "0.48914716", "0.48743564", "0.48658344", "0.48586646", "0.4846221", "0.48461786", "0.4832653", "0.48301437", "0.47956702", "0.47802758", "0.47721744", "0.476798", "0.4756594", "0.47507283", "0.47389957", "0.47389203", "0.47377175", "0.47306016", "0.47306016", "0.47306016", "0.47235352", "0.47216162", "0.47141305", "0.47046205", "0.46879593", "0.4686923", "0.4685871", "0.46822932", "0.46810618", "0.46810618", "0.46810618", "0.46763882", "0.46721935", "0.46707866", "0.46685585", "0.46609905", "0.4653876", "0.465281", "0.4648303", "0.4646027", "0.46455", "0.464325", "0.46418267", "0.46367142", "0.46256134", "0.46199957", "0.46195307", "0.46171123", "0.46155718", "0.4613528", "0.46117702", "0.46081218", "0.460457", "0.46006426", "0.45902705", "0.4588641", "0.45877716", "0.4583426", "0.4578909", "0.45781586", "0.45755947", "0.4574868" ]
0.56132346
3
Block up from a given set.
def block_up(self, frompoint): clause = [-(i + 1) for i in frompoint] self.add_clause(clause)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lift_up(self):\n\n # Can't reuse set_lift_pos due to bug above\n self.send(self.cmd.SET_LIFT_SET, self.cmd.SET_LIFT_SET[\"check\"][\"min\"])", "def bottom_up(self, safe=False):\n if safe:\n assert not self.cycle()\n discard = set()\n queue = deque(self.leaves())\n while queue:\n new = queue.popleft()\n if new.children() - discard:\n queue.append(new)\n else:\n discard.add(new)\n for parent in sorted(new.parents(), key=lambda x:x.nodeid):\n if not parent in discard and not parent in queue:\n queue.append(parent)\n yield new", "def set():", "async def plagueset(self, ctx):", "def _moveChainNode(self, chain, sets, buckets):\n #self._printBucketState() \n e = chain.pop()\n if e in self._locked:\n print \"error! can't move locked node.\"\n \n self.sets[sets[0]].remove(e)\n self.sets[sets[1]].add(e)\n #remove myself from my bucket\n self._buckets[e.biPartBuckets[0]][e.biPartChain[1]]\n #e.biPartBuckets = [e.biPartBuckets[1],e.biPartBuckets[0]]\n e.biPartBuckets.reverse()\n e.biPartSets.reverse()\n\n #e.biPartSets = [e.biPartSets[1],e.biPartSets[0]]\n\n #find new gain: (We can actually skip this step)\n #self._addToBucket(buckets[1], e, sets[1], sets[0])\n # add moved node to locked set\n self._locked.add(e)\n for n in (e.parents + e.children):\n self._updateBucketNode(n)\n\n #self._printBucketState() \n return", "def difference_update(self, rng_set: Union[Rangelike, Iterable[Rangelike]]) -> None:\n self.discard(RangeSet._to_rangeset(rng_set))", "def test_heuristic_abort(self):\n graph = {}\n for u in self.complete:\n graph[u] = set()\n for v in self.complete[u]:\n if u != v: # ignore self-loop\n graph[u].add(v)\n next_node = min_fill_in_heuristic(graph)\n if next_node is None:\n pass\n else:\n assert False", "def block_seen(self):\n self.blocklist.update(self.mapping.values())\n self.mapping = dict()", "def move_lift_up():\n return _move_lift(1)", "def wait(self, *args):\n # TODO -- say something\n if self.finished_places == 7:\n self.finished_places += 1\n return super(Up, self).wait(*args)", "def level_up(self):\n pass", "def blocks(self):\n pass", "def test_set(self):\n a = set()\n a.add('b')\n a.add('c')\n a.add('a')\n b = list(a)\n b.sort()\n self.assertEqual(b, ['a', 'b', 'c'])\n a.remove('b')\n b = list(a)\n b.sort()\n self.assertEqual(b, ['a', 'c'])\n\n a.discard('d')\n\n b = set(['r', 's'])\n d = a.union(b)\n b = list(d)\n b.sort()\n self.assertEqual(b, ['a', 'c', 'r', 's'])", "def set():\n pass", "def link_graph_and_safe_set(graph, safe_set):\n for node, next_node in graph.edges_iter():\n edge = graph[node][next_node]\n edge['safe'] = safe_set[node:node + 1, edge['action']]", "def all_breakdown(self,toplevel=False,use_cache=True,getone=False, debug=False):\n todo_gates=self.gates.copy()\n breakdown_solutions=set()\n bat=0\n while todo_gates:\n if time.time()>self.timeout:\n return 'timeout'\n thisgate=todo_gates.pop()\n startpos,ns=thisgate\n self.set(startpos)\n self.gatesqs.remove(startpos)\n self.gates.remove(thisgate)\n dvs=[n[0] for n in [(0,(1,0)),(1,(0,1)),(2,(-1,0)),(3,(0,-1))] if not self.rows[startpos[1]+n[1][1]][startpos[0]+n[1][0]]]\n\n if ns==1:\n #(if it's a req gate)\n target_blocked=False\n indv=None\n for dv in dvs:\n if add(startpos,dv) in self.sqs:\n indv=dv\n break\n if indv==None:\n pass\n else:\n newdvs=[dv for dv in dvs if dv in [indv, (indv+1)%4, (indv-1)%4]]\n if newdvs!=dvs:\n dvs=newdvs\n #removes wrong way dvs totally!\n #adde the exception - it can occur when a room is reduced to just RR for example. not sure what to do then.\n for dv in dvs:\n dest=add(startpos,dv)\n if dest in self.sqs:\n if self.rows[dest[1]][dest[0]]==1:\n target_blocked=True\n break\n elif ns==2:\n dvs.append('skip')\n bat=0\n for indv in dvs:\n if indv=='skip':\n su=self.make_subrooms_from_current_state()[0]\n su.parent=self.hashid\n res=su.get_all_sols(use_cache=use_cache,getone=getone)\n if res=='timeout':\n self.gates.add(thisgate)\n self.gatesqs.add(startpos)\n self.unset(startpos)\n return res\n if not res:\n continue\n if getone and res:\n self.gates.add(thisgate)\n self.gatesqs.add(startpos)\n self.unset(startpos)\n return True\n this=(startpos,'skip',(),(),startpos)\n offset_amount=(self.xoffset-su.xoffset,self.yoffset-su.yoffset)\n for sr in res:\n fixed_sr=()\n for srgate in sr:\n start,sindv,path,covered,end=srgate\n newsr=(offset(start,offset_amount),sindv,tuple(path),tuple(offset(covered,offset_amount)),offset(end,offset_amount),)\n fixed_sr=fixed_sr+(newsr,)\n fixed_sr=(this,)+fixed_sr\n breakdown_solutions.add(fixed_sr)\n continue\n continuations=self.go_until_deadend_with_path(startpos,indv,[])\n if continuations=='timeout':\n self.gates.add(thisgate)\n self.gatesqs.add(startpos)\n self.unset(startpos)\n return 'timeout'\n for path,covered,stop in continuations:\n for g in self.gates:\n if g[0]==stop:\n endgate=g\n break\n self.gatesqs.remove(stop)\n self.gates.remove(endgate)\n #I have made my move. if room is done, return true.\n #if not done, try to solve my subrooms.\n\n self.set_many(covered)\n subrooms=self.make_subrooms_from_current_state()\n #it sucks that this makes them all... it should just make them one at a time. save a lot of work on big rooms.\n #90% of the time the smallest subroom is illegal\n #restore things to normal now; i made it out, and now the solution is up to the subrooms.\n self.gates.add(endgate)\n self.gatesqs.add(stop)\n self.unset_many(covered)\n initial_gate_exit=(startpos,indv,tuple(path),tuple(covered),stop,)\n partial_solutions=((initial_gate_exit,),)\n subrooms.sort(key=lambda x:len(x.gatesqs))\n subrooms_ok=True\n #actually the permut stuff is slightly off. we should find all permutations of sols within a room, not just perms of subrooms .\n if time.time()>self.timeout:\n self.gates.add(thisgate)\n self.gatesqs.add(startpos)\n self.unset(startpos)\n return 'timeout'\n subrooms_ok=True\n subroomreses={}\n for su in subrooms:\n if time.time()>self.timeout:\n self.gates.add(thisgate)\n self.gatesqs.add(startpos)\n self.unset(startpos)\n return 'timeout'\n subroomres=su.get_all_sols(toplevel=False,use_cache=use_cache,getone=getone)\n #SHOULD use cache here - just temp\n if subroomres=='timeout':\n self.gates.add(thisgate)\n self.gatesqs.add(startpos)\n self.unset(startpos)\n #resetting this stuff IS necessary!\n return 'timeout'\n\n #this is a bit weird. if one subroom times out it doesn't mean the others won'1! it's still possible\n #to get negative solutions from this point on - but no positive.\n #this res should be put in terms of the outer room, not this guy's room.\n #su's got absolute x,yoffsets, same as parent. but sols are coming in su's coord system. to convert to parent\n if getone and subroomres:\n continue\n if not subroomres:\n #that is, only if room definitely doesnt have a sol.\n subrooms_ok=False\n break\n subroomreses[su.hashid]=self.fix_subroomres(su, subroomres)\n if not subrooms_ok:\n #if this set of subrooms doesn't work just die. none of the others will work either.\n continue\n if getone:\n self.gates.add(thisgate)\n self.gatesqs.add(startpos)\n self.unset(startpos)\n return True\n #for each subroom order, choose one solution. then make one sol out of all the perms of that.\n if subrooms:\n aa=self.all_orderings_of_subroomreses(subroomreses)\n #and here we make all permutations of all the subrooms.\n including_this_subroom_partials=set()\n for partial in partial_solutions:\n for ii,a in enumerate(aa):\n if ii%128==0 and time.time()>self.timeout:\n self.gates.add(thisgate);self.gatesqs.add(startpos);self.unset(startpos);return 'timeout'\n if a=='timeout':\n self.gates.add(thisgate)\n self.gatesqs.add(startpos)\n self.unset(startpos)\n return 'timeout'\n combined=tuple(partial[:])+tuple(a[:])\n including_this_subroom_partials.add(combined)\n partial_solutions=including_this_subroom_partials\n #now you are done with this first stop/covered, are at another exit, and no subroom has failed. so, good on ya!\n if getone:\n self.gates.add(thisgate)\n self.gatesqs.add(startpos)\n self.unset(startpos)\n return True\n breakdown_solutions.update(partial_solutions)\n if breakdown_solutions:\n g_hassols_cache[self.key]=True\n self.gates.add(thisgate)\n self.gatesqs.add(startpos)\n self.unset(startpos)\n if getone:\n if not breakdown_solutions:\n return False\n return list(breakdown_solutions)", "def move_up(g,k): # g: graph; k: coefficient\n for i,_ in path(g): #i: node address\n if (i%k)!=0:\n move_up_node(g,i,k)", "def open_up(self):\n\n self.move(self.__max_step__)", "def normalize(self):\n blocks = set(self.blocks)\n queue = set([self.entry_point])\n visited = set()\n while queue:\n root = queue.pop()\n visited.add(root)\n for child in root.children:\n if child not in visited:\n queue.add(child)\n unreachable = blocks - visited\n for block in unreachable:\n block.detach()\n visited.remove(self.entry_point)\n for block in visited:\n if block.empty():\n for parent in block.parents: # Re-parent\n for child in block.children:\n parent.add_child(child)\n block.detach()\n unreachable.add(block)\n blocks -= unreachable\n self.blocks = [block for block in self.blocks if block in blocks]", "def bring_down(self):\n\n self.move(self.__min_step__)", "def unblock_all(t):\n blocked_count = 0\n\n while True:\n blocked_user_ids = t.blocks.ids()[\"ids\"]\n if not blocked_user_ids:\n print(\"No more IDs to unblock\")\n break\n\n for user_id in blocked_user_ids:\n blocked_count = blocked_count + 1\n print(f\"{blocked_count}: {user_id}\")\n try:\n t.blocks.destroy(user_id=user_id, include_entities=False, skip_status=True)\n except:\n print(\"error\")", "def _forward_local_setpoints(self):\n # ToDo: Move this into the subscriber callback\n # ToDo: If no new setpoints are incoming, set to loiter\n t = threading.current_thread()\n # Wait until the local setpoint are set\n while self.local_setpoint is None and getattr(t, \"do_run\", True):\n self._rate_publish.sleep()\n\n while not rospy.is_shutdown() and getattr(t, \"do_run\", True):\n self._pub_setpoint.publish(self.local_setpoint)\n self._rate_publish.sleep()\n return", "def leftUp(self):", "def collect_set(self, pidset):\r\n self.clear(pidset)\r\n self._process_lines(self._collect_set(pidset))", "def up(self):\n if self.top == self.current:\n return\n else:\n self.current += 1", "def move_up(self):\n self.move_step(-1)", "def restrict(self):\n calls = []\n while self[1:] not in calls:\n calls.append(self[1:])\n self.restrict_once()", "def set_blacklist(self):\n\n for name in self.__ipset:\n if self.verbose:\n print(\"Start create: \" + self.__ipset[name]['ipset-name'])\n\n # create ipset\n self.__process(name, self.__parser.create(name))\n\n if self.verbose:\n print('Done')", "def block_down(self, frompoint):\n comp = self.complement(frompoint)\n clause = [(i + 1) for i in comp]\n self.add_clause(clause)", "def _move_up(self, p):\n if p != self.data.first():\n self.data.add_first(self.data.delete(p))", "def complement(self, aset):\n return self.all_n.difference(aset)", "def _move_up(self, exclude=None):\n self._move(self._pos + _Vec3(0, 1, 0))\n self._place(self._pos + _Vec3(0, -1, 0), exclude)", "def lift_down(self):\n\n # Can't reuse set_lift_pos due to bug above\n bottom_limit = self.get_lift_limit()\n self.send(self.cmd.SET_LIFT_SET, bottom_limit)", "def uploads(self, requests, peers, history):\n\n round = history.current_round()\n logging.debug(\"%s again. It's round %d.\" % (\n self.id, round))\n # One could look at other stuff in the history too here.\n # For example, history.downloads[round-1] (if round != 0, of course)\n # has a list of Download objects for each Download to this peer in\n # the previous round.\n\n chosen = []\n bws = []\n if len(requests) == 0:\n logging.debug(\"No one wants my pieces!\")\n else:\n if round == 0:\n chosen = [request.requester_id for request in requests]\n bws = even_split(self.up_bw, len(chosen))\n else:\n requester_ids = [request.requester_id for request in requests]\n # requester_id : blocks given to us in last round\n last_dls = {}\n # find peers who unchoked me and update\n for dl in history.downloads[round-1]:\n # update peer with observed flow from peer if peer is a requester\n if dl.from_id in requester_ids:\n last_dls[dl.from_id] = dl.blocks\n\n # smallest to largest\n sorted_ids = sorted(last_dls, key=lambda k: last_dls[k], reverse=False)\n #if len(sorted_ids) > 3:\n # sorted_ids = sorted_ids[:2]\n total_dl = sum([last_dls[k] for k in sorted_ids])\n\n for chosen_peer in sorted_ids:\n chosen.append(chosen_peer)\n ratio = float(last_dls[chosen_peer])/float(total_dl)\n bw = ratio*self.percentage*self.up_bw\n bws.append(bw)\n\n others = list(set(requester_ids) - set(sorted_ids))\n if len(others) > 0:\n optimistic = random.choice(others)\n chosen.append(optimistic)\n bws.append(self.up_bw-sum(bws))\n\n # create actual uploads out of the list of peer ids and bandwidths\n uploads = [Upload(self.id, peer_id, bw)\n for (peer_id, bw) in zip(chosen, bws)]\n \n return uploads", "def block_one(self):", "def unlock_nodes(set_name):\n\n flg = logging.getLogger(\"lettuce.xgenSetup.unlock_nodes\")\n\n if mc.objExists(set_name):\n for o in mc.sets(set_name, query=True):\n if mc.lockNode(o, query=True):\n flg.info(\"Unlocking {}\".format(o))\n mc.lockNode(o, lock=False)\n else:\n flg.warning(\"Set, {}, does not exist\".format(set_name))", "def setUP(self):\r\n print(\"=====Begin test=====\")", "def normalize(self):\n queue = {self.entry_point}\n visited = set()\n while queue:\n root = queue.pop()\n visited.add(root)\n for child in root.children:\n if child not in visited:\n queue.add(child)\n unreachable = self.blocks - visited\n for block in unreachable:\n block.detach()\n visited.remove(self.entry_point)\n for block in visited:\n if block.empty():\n for parent in block.parents: # Re-parent\n for child in block.children:\n parent.add_child(child)\n block.detach()\n unreachable.add(block)\n self.blocks -= unreachable", "def _straight(self):\r\n \r\n flow = min(self.upstream.demand, self.downstream.supply)\r\n self.upstream.outflow = flow\r\n self.downstream.inflow = flow", "def _swim(self):\n child_ix = len(self) - 1\n parent_ix = self._get_parent(child_ix)\n while (parent_ix is not None and self._test(parent_ix, child_ix)):\n self._exch(parent_ix, child_ix)\n child_ix = parent_ix\n parent_ix = self._get_parent(parent_ix)", "def mine(self, block):\r\n for n in range(self.maxNonce):\r\n if int(block.generate_hash(), 16) <= self.chain.targetHash:\r\n self.chain.add(block)\r\n break\r\n else:\r\n block.nonce += 1", "def take(self):\n print(\"You fill the kettle with water.\")\n inventory.remove('kettle')\n collect('filled kettle')", "def try_moves(self, moves_set):\n for choice in tuple(moves_set):\n self.game.move(choice)\n self.game.board.create_layout()", "def greedy_shrink(self):\n while self.single_greedy_shrink_iteration():\n self.run_shrink_pass(\"lower_common_block_offset\")", "def move_up(self):\n\t\treturn self._move(up=True)", "def _move_up(self, p):\n if p == self._data.first():\n count = p.element()._count\n walk = self._data.before(p)\n if count > walk.element()._count: # must shift forward\n while (walk != self._data.first() and\n count > self._data.before(walk).element()._count):\n walk = self._data.before(walk)\n\n self._data.add_before(walk, self._data.delete(p)) # delete/reinsert", "def set_blockages(self, blockages, value=True):\n self.rg.set_blocked(blockages, value)", "def is_stuck(self, Set, list_of_grids):\n\t\n\t\tif self.Bubble_initial_pos[1] <= self.Bubble_radius:\n\t\t\tself.min_dist_grid(list_of_grids, self.Bubble_initial_pos)\n\t\t\tself.Bubble_vel = [0, 0]\n\t\t\tSet.add(self)\n\t\t\treturn True\n\n\t\tfor ball in set(Set):\n\t\t\tif dist(self.Bubble_initial_pos, ball.Bubble_initial_pos) <= (2.0*self.Bubble_radius):\n\t\t\t\tself.min_dist_grid(list_of_grids, self.Bubble_initial_pos) \n\t\t\t\tself.Bubble_vel = [0, 0]\n\t\t\t\tSet.add(self)\n\t\t\t\treturn True\n\n\t\treturn False", "def block_candset(self, candset, verbose=False, show_progress=True,\n n_chunks=1):\n logger.warning(\n \"WARNING THIS COMMAND IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN RISK.\")\n\n # validate data types of input parameters\n self.validate_types_params_candset(candset, verbose, show_progress,\n n_chunks)\n\n # get and validate metadata\n log_info(logger, 'Required metadata: cand.set key, fk ltable, ' +\n 'fk rtable, ltable, rtable, ltable key, rtable key', verbose)\n\n # # get metadata\n key, fk_ltable, fk_rtable, ltable, rtable, l_key, r_key = cm.get_metadata_for_candset(\n candset, logger, verbose)\n\n # # validate metadata\n cm._validate_metadata_for_candset(candset, key, fk_ltable, fk_rtable,\n ltable, rtable, l_key, r_key,\n logger, verbose)\n\n # validate rules\n assert len(self.rules.keys()) > 0, 'There are no rules to apply'\n\n # validate n_chunks parameter\n validate_object_type(n_chunks, int, 'Parameter n_chunks')\n validate_chunks(n_chunks)\n\n n_chunks = get_num_partitions(n_chunks, len(candset))\n # do blocking\n\n # # initialize the progress bar\n # if show_progress:\n # bar = pyprind.ProgBar(len(candset))\n\n # # set index for convenience\n l_df = ltable.set_index(l_key, drop=False)\n r_df = rtable.set_index(r_key, drop=False)\n\n # # get attributes to project\n l_proj_attrs, r_proj_attrs = self.get_attrs_to_project(l_key, r_key,\n [], [])\n l_df, r_df = l_df[l_proj_attrs], r_df[r_proj_attrs]\n\n c_df = self.block_candset_excluding_rule(candset, l_df, r_df, l_key,\n r_key,\n fk_ltable, fk_rtable, None,\n show_progress, n_chunks)\n\n # update catalog\n cm.set_candset_properties(c_df, key, fk_ltable, fk_rtable, ltable,\n rtable)\n\n # return candidate set\n return c_df", "def eliminate_left_recursion(self,stop_at_set):\n assert self.is_canonical\n\n # Determine a definite ordering of the rules.\n # Use a DFS so we only have essential backedges.\n preorder_names = self.preorder()\n preorder_index = dict()\n for name in preorder_names:\n preorder_index[name] = len(preorder_index)\n\n # Break self-cycles via more than one step\n for i in range(1,len(preorder_names)):\n rule_name = preorder_names[i]\n if rule_name in stop_at_set:\n continue\n rule = self.rules[rule_name]\n replacement = []\n changed = False\n for rhs in rule.as_container():\n phrase = rhs.as_container()\n first = phrase[0]\n rest = phrase[1:]\n if first.is_symbol_name():\n first_name = first.content\n j = preorder_index[first_name]\n if (j < i) and (first_name not in stop_at_set):\n # Break this backedge\n Aj = self.rules[first_name].as_container()\n if len(rest) == 0:\n # Add Aj's alternatives to Ai's alternatives.\n # Aj is a choice node\n # The elements of Aj are already of suitable class.\n replacement.extend([delta for delta in Aj])\n else:\n # Rest is non-empty\n for delta in Aj:\n replacement.append(self.MakeSeq(list_without_empty(delta.as_container()) + rest))\n changed = True\n else:\n # Pass it through. It's not a backedge, or we've been\n # asked to stop here.\n replacement.append(rhs)\n else:\n # First thing is not a symbol name. Pass it through\n replacement.append(rhs)\n if changed:\n # Update with the new rule.\n self.rules[rule_name] = self.MakeChoice(replacement)\n\n # Finally, eliminate immediate left recursion.\n self.eliminate_immediate_recursion(self)", "def doAllIn(self):\n self.doRaise(self.avatar.getChips())", "def box_it_up(self):\n for k in self:\n _conversion_checks(k, self.keys(), self._box_config, check_only=True)\n if self[k] is not self and hasattr(self[k], 'box_it_up'):\n self[k].box_it_up()", "def rollback(self, mapset=None):\n if mapset is None:\n mapset = self.current_mapset", "def add_to_queue(self, moves_set):\n new_moves = self.game.find_moves(moves_set[-1])\n for new_move in tuple(new_moves):\n self.queue.append(moves_set + [new_move])", "def _computebumpedset(repo):\n # get all possible bumped changesets\n tonode = repo.changelog.node\n publicnodes = (tonode(r) for r in repo.revs('public()'))\n successors = allsuccessors(repo.obsstore, publicnodes,\n ignoreflags=bumpedfix)\n # revision public or already obsolete don't count as bumped\n query = '%ld - obsolete() - public()'\n return set(repo.revs(query, _knownrevs(repo, successors)))", "def power_set(sett):\n\n powerset_so_far = {frozenset()}\n\n for element in sett:\n set.update(powerset_so_far,\\\n extend_all(element, powerset_so_far))\n \n return powerset_so_far", "def Set(self) -> None:", "def reverse_moves(self, moves_set):\n moves_set.reverse()\n for choice in tuple(moves_set):\n self.game.move([choice[0], -choice[1]])", "def BlockNextIteration(self):\n self._start_lock.clear()", "def up(self):\n self.move(0, 1)", "def complete(self):\n calls = []\n while self[1:] not in calls:\n calls.append(self[1:])\n self.complete_once()", "def ramp_down(self) -> None:\n for stock in self.stocks:\n if stock.are_any_shares_owned():\n self.cash_balance = stock.sell(-1, self.cash_balance, self.buy_budget)", "def test_block_missing_batch(self):\n pass", "def shuffle_up(self):\n self.clients.shuffle_up()\n self.group.layout_all()\n self.group.focus(self.clients.current_client)", "def unpunch(self):\n self.punching = 0", "def up(self, i):\n pass", "def go_up(self):\n\t\tself.x,self.y = Mario._get_coordinates(Board.prev_i,Board.prev_j,Board.prev_k)\n\t\tMario._pass(self.x,self.y)\n\t\ttemp = self.x\n\t\tflag = 0\n\t\twhile(temp>=self.x-8):\n\t\t\tif(Board.board[temp][self.y] in obstacles):\n\t\t\t\tflag = 1\n\t\t\t\ttemp_x = temp+1\n\t\t\t\tbreak\n\t\t\ttemp = temp-1\n\n\t\tif(not flag):\n\t\t\ttemp_x = self.x-8\n\n\t\tif Board.board[temp_x-1][self.y]=='B':\n\t\t\tnew = self.y\n\t\t\tfor i in range(new-4,new+5):\n\t\t\t\tif Board.board[temp_x-1][i]=='B':\n\t\t\t\t\tBoard.board[temp_x-1][i]='T'\n\t\t\tMario.bonus+=50\n\t\t\tif self.y==229 or self.y ==230 or self.y==231:\n\t\t\t\tBoard.board[23][230]='P'\n\n\n\t\tBoard.board[temp_x][self.y] = 'M'\t\t\n\t\tos.system('clear')\n\t\tinit_board(Board.prev_i,Board.prev_j,Board.prev_k)", "def miner_lock_blocks(self) -> int:", "def block_active(self, x):", "def block(self, tree, factors):\n # first we apply strip mining to the loops given in factors\n for x in range(len(factors)):\n\n # we may want to not block a particular loop, e.g. when doing Rivera/Tseng blocking\n if factors[x] > 1:\n tree = StencilCacheBlocker.StripMineLoopByIndex(x*2, factors[x]).visit(tree)\n\n # now we move all the outer strip-mined loops to be outermost\n for x in range(1, len(factors)):\n if factors[x] > 1:\n tree = self.bubble(tree, 2*x, x)\n\n return tree", "def limit_possible(self, *values):\n self.possible.intersection_update(values)", "def _move_up(self, p):\n if p != self._data.first():\n self._data.add_first(self._data.delete(p)) # remove or delete it from initial place and reinsert in new position", "def move_up(self, lifting, **kwargs):\n self.log.debug(\"Moving table up by {!s} microns\".format(lifting))\n if not self.variables[\"Table_state\"]:\n success = self.move_to([0, 0, lifting], False, 0, True, **kwargs)\n if success:\n self.variables[\"Table_state\"] = True # true means up\n return success\n else:\n self.queue.put({\"Info\": \"Table already in the up position...\"})\n return True", "def make_set_cover_pos(gRNA_hits, num_sets = 1, target_ids = [], algorithm = \"LAR\",\n id_key = lambda x: x, tie_breaker = tie_break_first, suppress_warning = False):\n # exclude_seqs = set(str(s).upper() for s in exclude_seqs)\n # gRNA_coverage = {seq: hits for seq, hits in gRNA_hits.hits.items()\n # if str(seq).upper() not in exclude_seqs}\n gRNA_coverage = gRNA_hits.hits\n eliminated_gRNA = {}\n ## prepare target ids\n if not target_ids:\n target_ids = set(hit.target_id for hit in gRNA_hits.flatten_hits())\n else:\n target_ids = set(target_ids)\n ## selected set cover algorithm\n if algorithm in (\"LAR\", \"greedy\"):\n set_cover_algo = set_cover_LAR if algorithm == \"LAR\" else set_cover_greedy\n else:\n raise Exception(f\"Invalid algorithm name: '{algorithm}'\")\n def coverage_possible():\n return set(id_key(hit) for hits in gRNA_coverage.values() for hit in hits) >= set(target_ids)\n ## function to generate set covers\n def make_set_cover(restore = []):\n for grna in restore:\n gRNA_coverage[grna.seq] = eliminated_gRNA[grna.seq]\n if not coverage_possible():\n if not suppress_warning:\n print((\"\\nError: The provided gRNA sequences cannot cover all target sequences\"\n \" at least once.\\n\"))\n return []\n selected_grnas = set_cover_algo(gRNA_coverage, target_ids, id_key = id_key, tie_breaker = tie_breaker)\n ## remove selected gRNA from candidates, and covert to gRNA object\n output = []\n for grna_seq in selected_grnas:\n ## remove\n eliminated_gRNA[grna_seq] = gRNA_coverage[grna_seq]\n del gRNA_coverage[grna_seq]\n ## convert gRNA sequences to gRNA object\n grna_seq_obj = gRNA_hits.get_gRNAseq_by_seq(grna_seq)\n output.append(gRNA(grna_seq_obj.id, grna_seq_obj))\n return output\n return make_set_cover", "def set_of_moves_b(tk, max_count=0):\n # Choose an orientation. Repeat if no possible move. Do this even\n # on first entry, just in case someone else has been drawing solid\n # lines.\n this_increment = 2 # Hard coded for clarity - must be < MIN_MOVE\n yield_count = 0\n yield_increment = 1 if max_count else 0\n available_moves = range(len(POSSIBLE_ROTATION))\n target_distance = random.randint(MIN_MOVE, MAX_MOVE)\n heading = tk.heading()\n while len(available_moves) and yield_count <= max_count:\n head_index = random.choice(available_moves)\n rotation = POSSIBLE_ROTATION[head_index]\n del available_moves[available_moves.index(head_index)]\n tk.setheading(heading) # Back to starting direction\n tk.left(rotation) # And try this candidate\n offered_distance = available_distance(tk, target_distance)\n if offered_distance >= MIN_MOVE:\n # Found a valid move\n yield_count += yield_increment\n yield tk.heading(), offered_distance\n # Back in after processing - reset choices\n available_moves = range(len(POSSIBLE_ROTATION))\n target_distance = random.randint(MIN_MOVE, MAX_MOVE)\n heading = tk.heading()\n #+--\n raise StopIteration", "def release_set(self):\n self._release_locks()\n if self._locks: # pragma: nocover\n # This shouldn't happen, it means we couldn't release our\n # locks, abort\n self._fail_out()\n return\n else:\n with self._state_change:\n if self.failed:\n return\n self.state = PartitionState.ALLOCATING\n self._child_watching(self._allocate_transition, async=True)", "async def process_changes(config, chains, allow=[], block=[]):\n allow_blocks = []\n if allow or block:\n print(\"Processing upstream change-set (%u entries)\" % (len(allow) + len(block)))\n processed = 0\n # First process any new allows\n if allow and chains: # If we have an INPUT default chain, refresh before unblock.\n await chains[0].refresh()\n\n for entry in allow:\n ip = entry[\"ip\"]\n if ip:\n if \"/\" in ip:\n as_block = netaddr.IPNetwork(ip)\n else:\n if \":\" in ip:\n as_block = netaddr.IPNetwork(\"%s/128\" % ip) # IPv6\n else:\n as_block = netaddr.IPNetwork(\"%s/32\" % ip) # IPv4\n allow_blocks.append(as_block)\n rule, chain = find_block(chains, as_block)\n while rule:\n print(\"Removing %s from block list (found at line %s as %s)\" % (ip, rule.line_number, rule.source))\n rv = await chain.remove(rule)\n if not rv:\n print(\"Could not remove ban for %s from iptables!\" % ip)\n else:\n rule, chain = find_block(chains, as_block)\n\n # Then process blocks\n for entry in block:\n ip = entry[\"ip\"]\n if ip:\n processed += 1\n if (processed % 500) == 0:\n print(\"Processed %u entries...\" % processed)\n # Only apply blocks if host is * or our specific name\n if entry.get('host', '*') not in [config['whoami'], '*']:\n continue\n banit = True\n if \"/\" in ip:\n as_block = netaddr.IPNetwork(ip)\n # We never ban larger than a /8 on ipv4 and /56 on ipv6\n if (as_block.version == 4 and as_block.size > MAX_BLOCK_SIZE_IPV4) or (\n as_block.version == 6 and as_block.size > MAX_BLOCK_SIZE_IPV6\n ):\n print(\"%s was requested banned but the net block is too large (%u IPs)\" % (as_block, as_block.size))\n continue\n else:\n if \":\" in ip:\n as_block = netaddr.IPNetwork(\"%s/128\" % ip) # IPv6\n else:\n as_block = netaddr.IPNetwork(\"%s/32\" % ip) # IPv4\n for wblock in allow_blocks:\n if as_block in wblock or wblock in as_block:\n print(\"%s was requested banned but %s is allow-listed, ignoring ban\" % (as_block, wblock))\n banit = False\n if banit:\n rule, chain = find_block(chains, as_block)\n if not rule:\n print(\"Adding %s to block list\" % ip)\n rv = await chains[0].add(ip, reason=entry[\"reason\"])\n if not rv:\n print(\"Could not add ban for %s in iptables!\" % ip)", "def clearListing(self, set: ghidra.program.model.address.AddressSetView) -> None:\n ...", "def lock_blocks(self) -> int:", "def betastar_from_pileup(self, bunch, target, betaratio=1):\n ip = self.clone()\n\n def ftosolve(beta):\n ip.betx = beta\n ip.bety = beta * betaratio\n return ip.pileup(bunch) - target\n\n res = scipy.optimize.root(ftosolve, ip.betx)\n if res.success:\n beta = res.x[0]\n ip.betx = beta\n ip.bety = beta * betaratio\n return ip\n else:\n print(res)\n raise ValueError(\"Could not find betastar\")", "def go_up(self, _: int = 0) -> None:\n if self.current_option > 0:\n self.current_option += -1\n else:\n self.current_option = self.last_item_index\n self.draw()", "def middleUp(self):", "def shield_up(screen, player):\n #Spawn the blocks\n screen.blocks = BlockGroup(screen.screen_width, screen.screen_height//1.2, screen.screen, 3, screen.player1.get_height() + 10)", "def test_heuristic_abort(self):\n graph = {}\n for u in self.complete:\n graph[u] = set()\n for v in self.complete[u]:\n if u != v: # ignore self-loop\n graph[u].add(v)\n\n deg_heuristic = MinDegreeHeuristic(graph)\n node = deg_heuristic.best_node(graph)\n if node is None:\n pass\n else:\n assert False", "def throttle_up( self ) ->(int,int):\n return None, None", "def uploads(self, requests, peers, history):\n \n\n round = history.current_round()\n # logging.debug(\"%s again. It's round %d.\" % (\n # self.id, round))\n\n # if no requests are made, then agent does not create any uploads\n if len(requests) == 0:\n return []\n \n # number of rounds to track in history to determine unchoke slots\n num_rounds_backtracking = 2\n num_unchoke_slots = int(math.sqrt(self.up_bw))\n\n # set of peers who get an unchoke slot\n unchoked_peers = set()\n\n # determine the list of peers who are requesting pieces from Agent\n requesting_peers = []\n for request in requests:\n if request.requester_id not in requesting_peers:\n requesting_peers.append(request.requester_id)\n\n \n # if round is less than 2 just randomly allocate unchoke slots, otherwise determine by highest download rate\n if (round < 2):\n chosen_peers = []\n if len(requesting_peers) >= num_unchoke_slots:\n chosen_peers = random.sample(requesting_peers,num_unchoke_slots)\n else:\n chosen_peers = requesting_peers\n for chosen_p in chosen_peers:\n unchoked_peers.add(chosen_p)\n\n else:\n # {peer: download_rate, .....}\n peer_by_download_rate_map = findPeerByDownloadRateInLastNRounds(\n num_rounds_backtracking, self, requesting_peers, history)\n\n # [(peer_id, download rate), ...] in descending order\n sorted_peer_by_download_rate = sorted(peer_by_download_rate_map.items(), key=lambda x:x[1], reverse=True)\n\n # find top 3 peers and their download rate\n for peer_id, download_rate in sorted_peer_by_download_rate[:num_unchoke_slots]:\n unchoked_peers.add(peer_id)\n\n # every 4th round, optimistically unchoke a peer that is not one of the top 3 peers\n if (round > 0 and round % 3 == 0 and len(requesting_peers) > len(unchoked_peers)):\n self.optimistically_unchoked_peer = random.choice(requesting_peers)\n while (self.optimistically_unchoked_peer in unchoked_peers):\n self.optimistically_unchoked_peer = random.choice(requesting_peers)\n unchoked_peers.add(self.optimistically_unchoked_peer) \n elif (self.optimistically_unchoked_peer != None):\n unchoked_peers.add(self.optimistically_unchoked_peer)\n \n bws = []\n if len(unchoked_peers) > 0:\n bws = even_split(self.up_bw, len(unchoked_peers))\n else:\n # don't allocate bandwidth if no peers are unchoked\n bws = [0 for _ in range (len(unchoked_peers))]\n\n uploads = [Upload(self.id, peer_id, bw)\n for (peer_id, bw) in zip(unchoked_peers, bws)]\n\n return uploads", "def walk_up(self, sound): \n \n # Checks if tile below Player is free, and if they are not in an animation cycle\n if (self.__maze_arrangement[self.__user_x][self.__user_y - 1] != 1) and not self.__animating:\n \n # Sets Player direction to up, animating state to true, moves the Player upwards\n # by one tile, and plays the walking sound effect \n self.__direction = \"UP\"\n self.__animating = True\n self.__user_y -= 1\n sound.play()", "def _update_setpoint(self, *args, value: Any, **kwargs) -> None:\n self._last_setpoint = value\n # Always set done to False when a move is requested\n # This means we always get a rising edge when finished moving\n # Even if the move distance is under our done moving tolerance\n self.done.put(0, internal=True)\n self._update_done()", "def next(self):\n if not self.minimal:\n self.sets, self.unexplored = subsampled_expand(\n self.sets,\n self.max_num_sets_to_expand,\n self.unexplored,\n self.explored,\n self.features,\n self.partitions,\n self.seed)\n while self.sets:\n self.minimal, self.explored, self.infeasible = \\\n minimal_set_exploration(self.sets,\n self.unexplored,\n self.explored,\n self.infeasible,\n self.partitions)\n if self.minimal:\n break\n else:\n self.sets, self.unexplored = subsampled_expand(\n self.sets,\n self.max_num_sets_to_expand,\n self.unexplored,\n self.explored,\n self.features,\n self.partitions,\n self.seed)\n if not self.minimal:\n raise StopIteration()\n return self.minimal.pop()", "def check_powerups(self):\n for powerup in self.pjs.powerups:\n block = powerup.rects[0]\n if block.overlap(self.rects[0]):\n self.eat(powerup)", "def createUpPyramidSets(blocksize,operating):\n sets = tuple()\n ul = blocksize[0]-operating\n dl = operating\n while ul > dl:\n r = numpy.arange(dl,ul,1)\n sets+=(tuple(product(r,r)),)\n dl+=operating\n ul-=operating\n return sets", "def sift_up(heap, start, end):\n # Swap last node with parents until no longer greater.\n i = end - 1\n heaped = False\n while i > start and not heaped:\n parent = (i - 1) // 2\n if compare(heap[i], heap[parent]) > 0:\n heap[i], heap[parent] = heap[parent], heap[i]\n i = parent\n else:\n heaped = True", "def ping_many_updown_iter(self, hosts):\n raise NotImplementedError()", "def sit(self):\n self.sit_out = False", "def percolate_up(self, index):\n if self.min is True:\n parent = self._parent(index)\n if index > 0 and self._data[index] < self._data[parent]:\n self._swap(index, parent)\n self.percolate_up(parent)\n if self.min is False:\n parent = self._parent(index)\n if index > 0 and self._data[index] > self._data[parent]:\n self._swap(index, parent)\n self.percolate_up(parent)", "def shush(self):\n cancel_all()", "def bft(self, starting_vertex):\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n q = Queue() # create an empty Queue \n q.enqueue(starting_vertex) # set enqueue with the starting vertex\n\n while q.size() > 0: # loop if the size is greater than 0\n v = q.dequeue() # dequeue and store \n\n if v not in visited: # if v has not in the set \n visited.add(v) # add v to the set \n print(v) \n # Then add all of its neighbors to the back of the queue\n for neighbor in self.vertices[v]: # loop through neighbors \n q.enqueue(neighbor) # add each neighbor to the end of the que ", "def hoist_until(self,target_rule_name,stop_at_set):\n assert self.is_canonical\n\n\n def expand_first(grammar,rule):\n \"\"\"\n When rule is\n Seq(A rest)\n and A -> A1 | ... | An\n Return [ A1 rest | ... | An rest ]\n\n If Ai is epsilon, then its corresponding term is just 'rest'\n \"\"\"\n result = []\n # Hoist the rule for 'other' nonterminal.\n phrase = rule.as_container()\n first = phrase[0]\n assert first.is_symbol_name() and (first.content != target_rule_name)\n #print(\" elaborating rule for {} \".format(first.content))\n rest = phrase[1:]\n other_rule = self.rules[first.content]\n for other_rhs in other_rule.as_container():\n result.append(grammar.MakeSeq(list_without_empty(other_rhs.as_container()) + rest))\n return result\n\n\n # Process in reverse order to reduce duplication.\n order_of_attack = list(reversed(self.preorder()))\n keep_going = True\n ancestors = set()\n while keep_going:\n keep_going = False\n #print(\"hoisting worklist: {}\".format(\" \".join(order_of_attack)))\n\n for candidate_rule_name in order_of_attack:\n rule = self.rules[candidate_rule_name]\n #print(\"consider {}\".format(candidate_rule_name))\n (with_target_rule_name,other_rules,term,empty) = rule.partition(target_rule_name)\n #print(\" {} {} {} {}\".format(len(with_target_rule_name),len(other_rules),len(term), len(empty)))\n if len(with_target_rule_name) > 0 and len(other_rules) > 0:\n #print(\" need to hoist\")\n # Need to hoist\n replacement = with_target_rule_name\n for other in other_rules:\n replacement.extend(expand_first(self,other))\n replacement.extend(term)\n replacement.extend(empty)\n self.rules[candidate_rule_name] = self.MakeChoice(replacement)\n #print(\"setting {} to {}\".format(candidate_rule_name,str(self.rules[candidate_rule_name])))\n keep_going = True\n if candidate_rule_name not in stop_at_set:\n ancestors.add(candidate_rule_name)\n\n for candidate_rule_name in order_of_attack:\n for ancestor in ancestors:\n rule = self.rules[candidate_rule_name]\n (with_ancestor,other_rules,term,empty) = rule.partition(ancestor)\n #print(\" {} {} {} {}\".format(len(with_ancestor),len(other_rules),len(term), len(empty)))\n if len(with_ancestor) > 0:\n #print(\" expanding ancestor {}\".format(ancestor))\n replacement = []\n for a_rule in with_ancestor:\n replacement.extend(expand_first(self,a_rule))\n replacement.extend(other_rules)\n replacement.extend(term)\n replacement.extend(empty)\n self.rules[candidate_rule_name] = self.MakeChoice(replacement)\n #print(\"setting {} to {}\".format(candidate_rule_name,str(self.rules[candidate_rule_name])))\n keep_going = True", "def move_up(self):\n return self._move(up=True)", "def mark_manipulative(self, keys=frozenset()):\n transaction = self.current_transaction\n if transaction is None:\n return\n transaction.watch(keys)\n if not transaction.commit_phase:\n transaction.begin_commit()" ]
[ "0.5980374", "0.5502509", "0.54139936", "0.5321069", "0.519504", "0.51196235", "0.5108691", "0.51076066", "0.5094848", "0.5038757", "0.4986755", "0.49557516", "0.49549985", "0.49490726", "0.49489588", "0.4919223", "0.49130073", "0.49110323", "0.48967567", "0.48951796", "0.4895023", "0.4894499", "0.4871853", "0.4871552", "0.48527953", "0.48412135", "0.48384246", "0.48345187", "0.48221663", "0.4814549", "0.48138785", "0.48067996", "0.48030886", "0.4797101", "0.4795268", "0.47925162", "0.47743055", "0.47704545", "0.47651196", "0.47561383", "0.4737431", "0.47332478", "0.47307044", "0.4727711", "0.47276744", "0.47252586", "0.47250128", "0.47196078", "0.47129783", "0.47114006", "0.46995422", "0.46909612", "0.46901074", "0.46898624", "0.468251", "0.4681228", "0.4679301", "0.4678365", "0.46525544", "0.4644899", "0.46429884", "0.46314427", "0.46280324", "0.46181232", "0.46143547", "0.46070653", "0.4603776", "0.46030042", "0.45995578", "0.45950684", "0.4595054", "0.4589402", "0.45865604", "0.4585453", "0.4585411", "0.45827955", "0.4576874", "0.4575988", "0.45747647", "0.45703363", "0.45698765", "0.45681882", "0.45641488", "0.4564064", "0.4562369", "0.45618454", "0.45608512", "0.4554465", "0.45542446", "0.45536304", "0.45526868", "0.4547223", "0.4544801", "0.45390025", "0.45288107", "0.45282525", "0.45233217", "0.45130122", "0.4510995", "0.45108715" ]
0.5053529
9
Settings for a step sequence
def __init__(self, step_time, step=None): self.step_vector = step self.step_time = step_time self.ref_timer = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def configure_step(self):\n pass", "def configure_step(self):\n\n pass", "def _setVals(self, step=0):\n self.step = step", "def step(self, step=None):\n pass", "def step(self, **kwargs):\n pass", "def setValuesInStep(\n self, stepName: str, interactionProperty: str = \"\", contactControls: str = \"\"\n ):\n pass", "def setValuesInStep(\n self, stepName: str, interactionProperty: str = \"\", contactControls: str = \"\"\n ):\n pass", "def steps(self, steps):\n\n self._steps = steps", "def sequence_params(self):", "def set_first_machine_time_step(self, first_machine_time_step):", "def _step(self):\n pass", "def step(self):\n\n pass", "def step(self, state):", "def configure(self, settings={}):\n\n self.context.apply(settings)\n self.context.check('process.steps', [])\n self._index = None", "def setStepByStep(self, enabled):\r\n if enabled == True:\r\n LOG(\"Enabled ByStep\")\r\n self.stepByStep = True\r\n elif enabled == False:\r\n LOG(\"Disabled ByStep\")\r\n self.stepByStep = False\r\n #TODO: send notification EXECUTOR CONFIGURED\r", "def _step(self) -> None:", "def __init__(self, *args):\n \n self.steps = args", "def do_step(self) -> None:", "def setStepSize(self, step_size):\n assert isinstance(step_size, int)\n self.step_size = step_size\n self.step_directions = [np.array([i[0], i[1]]) for i in [(0,0),\n (0,step_size),\n (0,-step_size),\n (step_size, 0),\n (-step_size,0)]]", "def config_step_sweep(self):\n self.write(\":SOUR:FREQ:MODE SWE;\"\n \":SOUR:SWE:GEN STEP;\"\n \":SOUR:SWE:MODE AUTO;\")", "def prepare_step(self, i=None):\n\n if i is None:\n i = self.current_step_index\n\n # Setting step class and settings\n step_class = self.get_step_class_at_index(i)\n step_settings = self.get_step_settings_at_index(i)\n step_label = self.get_step_label_at_index(i)\n\n # Setting the temporary values\n self.current_step_tmp_vals = self.get_step_tmp_vals_at_index(i)\n\n # Update print\n if self.get_param_value('verbose'):\n print(f\"{self.name}, step {i} \"\n f\"({self.routine_template.step_name(index=i)}), preparing...\")\n qubits = step_settings.pop('qubits', self.qubits)\n dev = step_settings.pop('dev', self.dev)\n autocalib_settings = self.settings.copy(\n overwrite_dict=step_settings.pop('settings', {}))\n # Executing the step with corresponding settings\n if issubclass(step_class, qbcal.SingleQubitGateCalibExperiment) or \\\n issubclass(step_class, QuantumExperiment):\n step = step_class(qubits=qubits,\n routine=self,\n dev=dev,\n step_label=step_label,\n settings=autocalib_settings,\n **step_settings)\n elif issubclass(step_class, IntermediateStep):\n step = step_class(routine=self,\n dev=dev,\n step_label=step_label,\n qubits=qubits,\n autorun=False,\n settings=autocalib_settings,\n **step_settings)\n elif issubclass(step_class, AutomaticCalibrationRoutine):\n step = step_class(routine=self,\n dev=dev,\n step_label=step_label,\n qubits=qubits,\n autorun=False,\n settings=autocalib_settings,\n **step_settings)\n else:\n raise ValueError(f\"automatic subroutine is not compatible (yet)\"\n f\"with the current step class {step_class}\")\n self.current_step = step\n self.current_step_settings = step_settings", "def ChangeStep(self, up=1, down=1, left=1, right=1):\n self.stepUp = up\n self.stepDown = down\n self.stepLeft = left\n self.stepRight = right", "def getSteps():", "def _setup_next_sequence(cls):\n return 0", "def _step(self):\n title()\n self.runCount = 1\n self.experiment.pause = False\n self._runExperiment()\n self.pause = True", "def setDirection(self,stepDir = 2):\n pass", "def SetStepDelay(self,delay=200): \n self.Bus.Transaction(chr(self.Address)+chr(0x43)+chr(delay))", "def set_step(self):\n super(Pdb, self).set_step()\n if hasattr(self, \"_set_trace_use_next\"):\n del self._set_trace_use_next\n self.set_next(self._via_set_trace_frame)", "def build_step(self):\n pass", "def build_step(self):\n pass", "def step(self, model):\n pass", "def step(self, model):\n pass", "def record(self, step):", "def step_index(self, step_index):\n\n self._step_index = step_index", "def __init__(self, set_bits):\n self.set_bits = set_bits\n self.current_step = 0", "def make_step(self):\n self.step_vals = np.cumsum(self.vals)", "def step(self, steps):\n self._simulate(endStep=self.currentStep+steps)", "def build_step(self):\n\n pass", "def __init__(self, routine, *args, **kwargs):\n self.kw = kwargs\n Step.__init__(self, routine=routine, *args, **kwargs)\n t1_settings = self.parse_settings(self.get_requested_settings())\n qbcal.T1.__init__(self, dev=self.dev, **t1_settings)", "def sequencePreparation(self):\n #Calculation of the number of frames in function of the duration + LED list for the acquisition\n if self.seqMode == \"rgbMode\":\n self._rgbSequenceInit()\n elif self.seqMode == 'rbMode':\n self._rbSequenceInit()\n else:\n print('Please select a valid mode of led sequence initialization')\n #Sending nb of frames to initialize the progress bar\n if type(self.nbFrames) == int:\n self.nbFramesSig.emit(self.nbFrames)\n\n print('acquisition Side : ', self.expRatio)\n #Saving the configuration of the experiment file (.json)\n self.savePath = cfgFileSaving(self.experimentName,\n self.nbFrames,\n self.duration,\n self.expRatio,\n self.acquMode,\n self.seqMode,\n self.rgbLedRatio,\n self.greenFrameInterval,\n round(1/self.cycleTime,2), #framerate\n self.folderPath,\n self.colorMode,\n self.mmc,\n 'Zyla') #WARNING > modulabilty (there is a way to get device label but it's not so easy)\n\n #initialization of the acquisition saving files : .tif (frames) and .txt (metadata)\n (self.tiffWriterList, self.textFile) = filesInit( self.savePath,\n self.experimentName,\n self.nbFrames,\n self.maxFrames)\n #send all informations to each LED driver\n self.arduinoSync()", "def stepStarted(build, step):", "def configure_stepper(self):\n self.logger.info('configurating stepper')\n if 'Z' in self.current_axis:\n self.anc350_instrument.configure_stepper('ZPiezoStepper', self.settings['amplitudeZ'] * ur('V'), self.settings['frequencyZ'] * ur('Hz'))\n else:\n self.anc350_instrument.configure_stepper('XPiezoStepper', self.settings['amplitudeX'] * ur('V'), self.settings['frequencyX'] * ur('Hz'))\n self.anc350_instrument.configure_stepper('YPiezoStepper', self.settings['amplitudeY'] * ur('V'), self.settings['frequencyY'] * ur('Hz'))\n\n self.gui.groupBox_actions.setObjectName(\"Colored_actions\")\n self.gui.groupBox_actions.setStyleSheet(\"QGroupBox#Colored_actions {border: 1px solid blue; border-radius: 9px;}\")\n\n self.gui.stackedWidgetMoving.setEnabled(True)\n\n self.get_move()", "def __init__(self, *args, **kwargs):\n Step.__init__(self, *args, **kwargs)\n\n # Configure all of the steps\n for key, val in self.step_defs.items():\n cfg = self.steps.get(key)\n if cfg is not None:\n new_step = val.from_config_section(\n cfg, parent=self, name=key,\n config_file=self.config_file)\n else:\n new_step = val(\n key, parent=self, config_file=self.config_file,\n **kwargs.get(key, {}))\n\n setattr(self, key, new_step)", "def set_step_conf(self, conf):\n return self.step_conf", "def __init__(self, step_time, step_interval=None, n_step=None, ss=None):\n self.ss = ss\n self.n_step = n_step\n self.interval = step_interval\n self.step_time = step_time", "def step(self):\n raise NotImplementedError", "def __init__(self,\n steps,\n global_settings=None,\n routine=None,\n ):\n super().__init__(steps)\n\n if routine is not None:\n self.routine = routine\n\n if global_settings is not None:\n self.global_settings = global_settings\n else:\n self.global_settings = {}", "def __init__(self, num_id, name, redone, skipped, tasks):\n self.id = num_id\n self.name = name\n self.redone = redone\n self.skipped = skipped\n self.tasks = tasks\n super().__init__(Elements.STEP)", "def step(self):\r\n raise NotImplementedError", "def Move_Stage(self):\n for i in range(3):\n if self.set_pos[i] == 0:\n continue\n print \"Moving stage %s by %s steps\\n\"%(self.POS_NAME[i], self.set_pos[i])\n self.ser.write('F,C'+self.STEPPER_NAME[i]+str(self.set_pos[i])+',R')\n time.sleep(0.5)\n time.sleep(0.5)\n return", "def step_env(self):\n raise NotImplementedError\n # Not needed for this homework", "def setStepperMode(self, sMode):\n self.stepperMode = sMode", "def step_forward(self):", "def __init__(self):\n self.step_list = [steps.Raw()]", "def getCurrentStep():", "def step(self, action):", "def __init__(self, initial_value, n_values, schedule):\n self.step = 0.\n self.initial_value = initial_value\n self.nvalues = n_values\n self.schedule = SCHEDULES[schedule]", "def create_step(self, step):\n raise NotImplementedError", "def step(self, s, a):\n raise NotImplementedError", "def set_switching_params(self, config_table):\n self.set_number_switching_periods(config_table)\n if self.swtype == \"fsw\":\n for i in range(1, len(self.delta) + 1):\n sd = \"switchDeltas,{}\".format(i)\n self.seq.add_param(self.mng_name, sd, str(self.delta[i - 1]))\n else:\n self.seq.add_param(self.mng_name, \"switchDeltas,1\",\n str(self.delta))", "def train_step(self):\n pass", "def __init__(self, step_time, mu=None, sigma=None, n_step=None, ss=None):\n self.ss = ss\n self.n_step = n_step\n self.mu = mu\n self.sigma = sigma\n self.step_time = step_time", "def set_parameter_and_step(self, pname, value, nstep=2, warning_action=\"default\"):\n setattr(self.p, pname, value)\n with warnings.catch_warnings():\n warnings.simplefilter(warning_action)\n for _ in range(nstep):\n self.step()", "def SetAnimationStep(self, step):\r\n\r\n self._animation_step = float(step)", "def step(self):\n self.schedule.step()", "def setUp(self):\n self.t = True\n self.f = False\n self.value = 25", "def step(self):\n self.driver.step()", "async def configure_stepper(self, number: str, config: dict) -> \"StepperPlatformInterface\":\n raise NotImplementedError", "def setNumTimeSubSteps(*argv):", "def step(self, step=2, head_i=None):\n # choose model\n if self.ensemble is not None:\n model = self.ensemble\n elif head_i is not None:\n model = self.models[head_i]\n else:\n model = self.model\n\n # sequence input\n sequence = tf.keras.Input(shape=(self.seq_length, 4), name='sequence')\n\n # predict and step across positions\n preds = model(sequence)\n step_positions = np.arange(preds.shape[1], step=step)\n preds_step = tf.gather(preds, step_positions, axis=-2)\n model_step = tf.keras.Model(inputs=sequence, outputs=preds_step)\n\n # replace model\n if self.ensemble is not None:\n self.ensemble = model_step\n elif head_i is not None:\n self.models[head_i] = model_step\n else:\n self.model = model_step", "def _step(self) -> int:\n return self._config[CONF_STEP]", "def step(self):\n raise NotImplementedError()", "def step(self):\n raise NotImplementedError()", "def step(self):\n raise NotImplementedError()", "def test_set_single_step(self):\n self.server_widget.single_step = 25\n assert self.client_widget.single_step == self.server_widget.single_step", "def set_IStep(self, istep = [0], istep_sigma = [0], tstep = 5, tdur = 1e6, give_freq = True):\n if self.do_run:\n #for m in self.ic_steps:\n # m.destroy()\n # del m \n #del self.ic_steps\n \n #self.ic_steps = []\n \n istep = list(istep)\n neg = False\n \n for n in range(self.n_celltypes):\n \n if istep[n] < 0: \n neg = True\n istep[n] = abs(istep[n]) # make positive again\n \n if istep[n] != 0:\n if give_freq is True:\n a = np.array([istep[n]])\n iin = self.get_i(a, n)[0]\n if self.id == 0: print \"celltype: \", n, \" istep: \", istep[n], \"Hz => \", iin, \" nA\"\n istep[n] = iin \n \n for n in range(self.n_celltypes):\n \n for i, gid in enumerate(self.gidlist[n]): # for every cell in the gidlist \n \n np.random.seed(gid*30)\n \n if self.i_holdrs == []:\n \n if istep_sigma[n] != 0:\n istep_r = np.random.normal(istep[n], istep[n]*istep_sigma[n], 1).clip(min=0)\n else: # same ihold for all cells!\n istep_r = istep[n]\n \n else: # ihold has been set!\n \n if istep_sigma[n] != 0:\n istep_r = np.random.normal(istep[n]-self.i_holds[n], (istep[n]-self.i_holds[n])*istep_sigma[n], 1).clip(min=0) # delta now! put on top of hold!\n else: # same ihold for all cells!\n istep_r = istep[n]-self.i_holds[n] # delta now! put on top of hold!\n \n if neg:\n istep_r = -1*istep_r\n \n if istep[n] == 0:\n istep_r = -1*self.i_holdrs[n][i] \n \n #print 'is:' + str(istep_r) + 'was:' + str(self.i_holdrs[n][i])\n \n if istep_r != 0: \n # step current\n ic_step = h.IClamp(self.cells[n][i].soma(0.5))\n ic_step.delay = tstep/ms\n ic_step.dur = tdur/ms\n ic_step.amp = istep_r/nA\n self.ic_steps.append(ic_step)\n \n \n if self.id == 0: print \"set_IStep finished. istep: \", istep, \", istep_sigma: \", istep_sigma", "def update_all_step_settings(self, settings):\n for i, x in enumerate(self):\n self.update_settings_at_index(settings, index=i)", "def TestOneStep(self):\n pass", "def __init__(self, step_time, saw_time, step_interval=None, n_step=None, ss=None):\n self.ss = ss\n self.n_step = n_step\n self.interval = step_interval\n self.step_time = step_time\n self.saw_time = saw_time", "def do_steps(self):\n steps = self.get_step_conf()\n all_step_config = dict()\n for k, v in steps.items():\n tmp_list = list()\n all_step_config[k] = tmp_list\n start = v[\"Start Value\"]\n end = v[\"End Value\"]\n # special handling of edge length\n if(k == \"Edge Length\"):\n start = self.convert_to_tuple(start)\n end = self.convert_to_tuple(end)\n tmp_list.append(str(start))\n while(start != end):\n start = self.add_edge_length(\n start, self.convert_to_tuple(v[\"Step\"]))\n tmp_list.append(str(start))\n print start\n else:\n tmp_list.append(float(start))\n while float(start) < float(end):\n start = float(start) + float(v[\"Step\"])\n tmp_list.append(start)\n return all_step_config", "def step(self, action):\n pass", "def step(self, action):\n pass", "def setup(self, i, t):\n \n self.t = t\n return i", "def increment_steps(self):\n self.num_steps += 1", "def setup_steps(self):\n step1 = ground_step.Ground(5745, 495, 40, 44)\n step2 = ground_step.Ground(5788, 452, 40, 44)\n step3 = ground_step.Ground(5831, 409, 40, 44)\n step4 = ground_step.Ground(5874, 366, 40, 176)\n\n step5 = ground_step.Ground(6001, 366, 40, 176)\n step6 = ground_step.Ground(6044, 408, 40, 40)\n step7 = ground_step.Ground(6087, 452, 40, 40)\n step8 = ground_step.Ground(6130, 495, 40, 40)\n\n step9 = ground_step.Ground(6345, 495, 40, 40)\n step10 = ground_step.Ground(6388, 452, 40, 40)\n step11 = ground_step.Ground(6431, 409, 40, 40)\n step12 = ground_step.Ground(6474, 366, 40, 40)\n step13 = ground_step.Ground(6517, 366, 40, 176)\n\n step14 = ground_step.Ground(6644, 366, 40, 176)\n step15 = ground_step.Ground(6687, 408, 40, 40)\n step16 = ground_step.Ground(6728, 452, 40, 40)\n step17 = ground_step.Ground(6771, 495, 40, 40)\n\n step18 = ground_step.Ground(7760, 495, 40, 40)\n step19 = ground_step.Ground(7803, 452, 40, 40)\n step20 = ground_step.Ground(7845, 409, 40, 40)\n step21 = ground_step.Ground(7888, 366, 40, 40)\n step22 = ground_step.Ground(7931, 323, 40, 40)\n step23 = ground_step.Ground(7974, 280, 40, 40)\n step24 = ground_step.Ground(8017, 237, 40, 40)\n step25 = ground_step.Ground(8060, 194, 40, 40)\n step26 = ground_step.Ground(8103, 194, 40, 360)\n\n step27 = ground_step.Ground(8488, 495, 40, 40)\n\n self.step_group = pygame.sprite.Group(step1, step2,\n step3, step4,\n step5, step6,\n step7, step8,\n step9, step10,\n step11, step12,\n step13, step14,\n step15, step16,\n step17, step18,\n step19, step20,\n step21, step22,\n step23, step24,\n step25, step26,\n step27)", "def set_options(self, options):\n self._set_steps(options.get('bounds', [(0,1)]), options.get('steps',2))", "def step(self, move):", "def step(self, action: np.ndarray) -> 'EnvStep':\n ...", "def __init__(self, asa_factory: AsaFactory):\n self.step_in_progress = False\n self.asa_factory = asa_factory", "def workflow_step(self, workflow_step):\n\n self._workflow_step = workflow_step", "def update_step_size(self):\n self.setSingleStep(10 ** self.step_exponent)\n self.update_format_string()", "def __init__(__self__, *,\n multistep_number: int,\n outcome_summary: str,\n run_duration: 'outputs.DurationResponse',\n step_id: str):\n pulumi.set(__self__, \"multistep_number\", multistep_number)\n pulumi.set(__self__, \"outcome_summary\", outcome_summary)\n pulumi.set(__self__, \"run_duration\", run_duration)\n pulumi.set(__self__, \"step_id\", step_id)", "def set_sequence(self, counter):\n self.seq_counter = counter", "def _set_steps(self, bounds, steps):\n if type(steps) == int:\n self.steps = [np.linspace(b1,b2,steps) for b1,b2 in bounds]\n elif type(steps) == list and type(steps[0]) == int:\n self.steps = [np.linspace(b1, b2, s) for (b1, b2), s in zip(bounds, steps)]\n else:\n self.steps = steps.copy()", "def configure_steps(\n self,\n config: ConfigDict,\n len_train: int,\n len_test: int,\n ):\n # Set required defaults if not present\n if \"batch_size\" not in config:\n batch_size = 2 * jax.device_count()\n else:\n batch_size = config[\"batch_size\"]\n if \"num_epochs\" not in config:\n num_epochs = 10\n else:\n num_epochs = config[\"num_epochs\"]\n\n # Determine sharded vs. batch partition\n if batch_size % jax.device_count() > 0:\n raise ValueError(\"Batch size must be divisible by the number of devices\")\n self.local_batch_size: int = batch_size // jax.process_count()\n\n # Training steps\n self.steps_per_epoch: int = len_train // batch_size\n config[\"steps_per_epoch\"] = self.steps_per_epoch # needed for creating lr schedule\n self.num_steps: int = int(self.steps_per_epoch * num_epochs)\n\n # Evaluation (over testing set) steps\n num_validation_examples: int = len_test\n if \"steps_per_eval\" not in config:\n self.steps_per_eval: int = num_validation_examples // batch_size\n else:\n self.steps_per_eval = config[\"steps_per_eval\"]\n\n # Determine monitoring steps\n if \"steps_per_checkpoint\" not in config:\n self.steps_per_checkpoint: int = self.steps_per_epoch * 10\n else:\n self.steps_per_checkpoint = config[\"steps_per_checkpoint\"]\n\n if \"log_every_steps\" not in config:\n self.log_every_steps: int = self.steps_per_epoch * 20\n else:\n self.log_every_steps = config[\"log_every_steps\"]", "def set_current_pipeline_step(self, current_step):\n self.current_step = current_step\n current_pipeline = self._config['pipeline'][current_step]\n\n # Overwrite defaults with the current configuration\n self.pipeline_config = cascade_overwrite_dict(self._config['pipeline']['default'],\n self._config['pipeline'][current_step])\n\n self.model_object = cascade_overwrite_dict(self._config['model']['default'],\n self.pipeline_config['train']['model'])\n return self.pipeline_config", "def test_tstep(self):\n model = BDF(debug=None)\n\n sid = 42\n n1 = n2 = 5\n dt1 = dt2 = 0.1\n no1 = no2 = 3\n card = ['TSTEP', sid,\n n1, dt1, no1, None, None, None, None, None,\n n2, dt2, no2]\n model.add_card(card, card[0], comment='tstep comment')\n model.validate()\n tstep = model.tsteps[42]\n tstep.raw_fields()\n tstep.write_card()\n tstep.write_card(size=16)\n\n sid = 43\n N = 5\n DT = 0.1\n NO = 3\n tstep2 = model.add_tstep(sid, N, DT, NO)\n tstep2.raw_fields()\n tstep2.write_card()\n tstep2.write_card(size=16)\n save_load_deck(model)", "def _step(self, whence):\n pass", "def step_impl(context):\n pass", "def step_impl(context):\n pass", "def _setup(self) -> None:\n # Call base implementation\n super()._setup()\n\n # Configure the low-level integrator\n engine_options = self.simulator.engine.get_options()\n engine_options[\"stepper\"][\"iterMax\"] = 0\n engine_options[\"stepper\"][\"dtMax\"] = min(0.02, self.step_dt)\n engine_options[\"stepper\"][\"logInternalStepperSteps\"] = False\n\n # Set maximum computation time for single internal integration steps\n if self.debug:\n engine_options[\"stepper\"][\"timeout\"] = 0.0\n else:\n engine_options[\"stepper\"][\"timeout\"] = 2.0\n\n # Enable logging of geometries in debug mode\n if self.debug:\n engine_options[\"telemetry\"][\"isPersistent\"] = True\n\n # Update engine options\n self.simulator.engine.set_options(engine_options)\n\n # Set robot in neutral configuration\n qpos = self._neutral()\n framesForwardKinematics(\n self.robot.pinocchio_model, self.robot.pinocchio_data, qpos)" ]
[ "0.69822484", "0.69781184", "0.6863541", "0.65149957", "0.65120053", "0.6452456", "0.6452456", "0.64299405", "0.63822", "0.63449067", "0.6200995", "0.6192105", "0.6170368", "0.61481583", "0.6145377", "0.6126502", "0.60600954", "0.60401684", "0.60263544", "0.60128444", "0.59885585", "0.5987517", "0.5940866", "0.5881326", "0.5873912", "0.58426076", "0.5839174", "0.58390033", "0.582415", "0.582415", "0.58215195", "0.58215195", "0.5817701", "0.581012", "0.5778687", "0.57722694", "0.5771704", "0.57709587", "0.57517797", "0.57466644", "0.5738484", "0.57090884", "0.57071406", "0.5705569", "0.56984884", "0.56964093", "0.5694228", "0.56790966", "0.56784016", "0.5677703", "0.5674244", "0.5673798", "0.5660272", "0.5652968", "0.5632402", "0.56303793", "0.5630152", "0.56233335", "0.5621236", "0.559466", "0.5589965", "0.55884475", "0.5582165", "0.5571267", "0.55644757", "0.5561152", "0.55471283", "0.55468464", "0.5540935", "0.55408806", "0.55290514", "0.5524912", "0.5524912", "0.5524912", "0.551003", "0.5509748", "0.5498718", "0.54798263", "0.5479346", "0.54777426", "0.54775584", "0.54775584", "0.54747933", "0.5470802", "0.5468542", "0.54659545", "0.5448971", "0.5440044", "0.5432228", "0.54243267", "0.54065114", "0.5399815", "0.5389905", "0.53848433", "0.53830045", "0.53828764", "0.5381743", "0.53804535", "0.5372635", "0.5372635", "0.5372521" ]
0.0
-1
Generate a step signal sequence
def out(self, t: any, dim=(None, None)) -> any: u = np.zeros(shape=dim) j = 0 for i in range(len(t)): if t[i] % self.step_time == 0 and t[i] != 0 and j + 1 != len(self.step_vector): j += 1 u[i, :] = self.step_vector[j] return u
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_step(self):\n self.step_vals = np.cumsum(self.vals)", "def sawtooth_factory(motor, start, stop, step_size):\n if stop < start:\n start, stop = stop, start\n\n num_pos = int((stop - start) // step_size)\n j = itertools.count()\n last_group = None\n\n def x_motion_per_step(dets, stream_name):\n nonlocal last_group\n if last_group is not None:\n yield from bps.wait(last_group)\n yield from bps.trigger_and_read(dets, stream_name)\n last_group = short_uid()\n target = start + step_size * (next(j) % num_pos)\n yield from bps.abs_set(motor, target, group=last_group)\n\n return x_motion_per_step", "def step(self, state):", "def step(self, s, a):\n raise NotImplementedError", "def step(self, d=1):\n raise NotImplementedError()", "def step_sequence_gen(track, click=False, fillvalue=0.0, t=None, srate=None):\n if t is None:\n t = time_gen(srate=srate)\n else:\n t = iter(t)\n t0 = next(t)\n t1 = t0\n duration = 1\n for tple in track:\n if hasattr(tple,'__getitem__'):\n value = tple[0]\n if len(tple) > 1:\n duration = tple[1]\n else:\n value = tple\n while t0 + duration > t1:\n yield value\n if click:\n value = fillvalue\n t1 = next(t)\n t0 = t1", "def gen_random_walk(self,n_step=100):\n # Warning about the small number of steps\n if n_step < 30:\n print(\"WARNING! The number of steps is small. It may not generate a good stochastic process sequence!\")\n \n w = np.ones(n_step)*self.x0\n \n for i in range(1,n_step):\n # Sampling from the Normal distribution with probability 1/2\n yi = np.random.choice([1,-1])\n # Weiner process\n w[i] = w[i-1]+(yi/np.sqrt(n_step))\n \n return w", "def _step(self) -> None:", "def record(self, step):", "def step(self, step=None):\n pass", "def sequence(context, data):\n number = data.get(\"number\", context.params.get(\"start\", 1))\n stop = context.params.get(\"stop\")\n step = context.params.get(\"step\", 1)\n delay = context.params.get(\"delay\")\n prefix = context.params.get(\"tag\")\n while True:\n tag = None if prefix is None else \"%s:%s\" % (prefix, number)\n\n if tag is None or not context.check_tag(tag):\n data[\"number\"] = number\n context.emit(data=data)\n\n if tag is not None:\n context.set_tag(tag, True)\n\n number = number + step\n if step > 0 and number >= stop:\n break\n if step < 0 and number <= stop:\n break\n\n if delay is not None:\n data[\"number\"] = number\n context.recurse(data=data, delay=delay)\n break", "def step(self, steps):\n self._simulate(endStep=self.currentStep+steps)", "def pyramid_factory(motor, start, stop, step_size):\n if stop < start:\n start, stop = stop, start\n last_group = None\n last_pos = start\n\n def x_motion_per_step(dets, stream_name):\n nonlocal last_group\n nonlocal last_pos\n nonlocal step_size\n\n if last_group is not None:\n yield from bps.wait(last_group)\n\n yield from bps.trigger_and_read(dets, stream_name)\n\n last_group = short_uid()\n\n if not start < last_pos + step_size < stop:\n step_size *= -1\n last_pos += step_size\n\n yield from bps.abs_set(motor, last_pos, group=last_group)\n\n return x_motion_per_step", "def step(self, dt_usec):\n\n # If we have no listeners, don't waste time calculating samples\n # @todo: Maybe calculate self.next_step so that we can add sensors during sim, but only if it turns out to be necessary\n if len(self.step_listeners) == 0:\n return\n \n # If the start of our next sample is greater than 1 (step), skip creating samples for this step\n if self.next_start >= 1.0:\n self.next_start -= 1\n return\n \n samples_per_step = self.sampling_rate * dt_usec / 1000000.\n sample_pct_of_step = 1.0/samples_per_step + 0.00000001 # For lerping -- add a tiny amount to eliminate floating point errors (doesn't affect the sim at this scale)\n\n self.step_lerp_pcts = np.arange(self.next_start, 1.0, sample_pct_of_step)\n\n # Call get_step_samples() (implemented in subclasses) to get the samples and add them to the buffer\n samples = self.create_step_samples(dt_usec) # Format np.array([<sample time>, <sample data 1>, ...])\n\n # Send our data to any attached listeners\n #self.logger.debug(\"Sending samples to {} step listeners\".format(len(self.step_listeners)))\n for step_listener in self.step_listeners:\n step_listener.step_callback(self, samples)\n\n # Update or start pct for the next step\n # @TODO: If we don't add .0000001 (or any tiny number, really) here the number of samples taken will be off by quite a bit at smaller step sizes. Probably floating point error....\n #self.next_start = sample_pct_of_step - (1 - self.step_lerp_pcts[-1]) +.0000001 # Works, but moved this to sample_pct_of_step calculation\n self.next_start = sample_pct_of_step - (1 - self.step_lerp_pcts[-1])", "def step(self, step=2, head_i=None):\n # choose model\n if self.ensemble is not None:\n model = self.ensemble\n elif head_i is not None:\n model = self.models[head_i]\n else:\n model = self.model\n\n # sequence input\n sequence = tf.keras.Input(shape=(self.seq_length, 4), name='sequence')\n\n # predict and step across positions\n preds = model(sequence)\n step_positions = np.arange(preds.shape[1], step=step)\n preds_step = tf.gather(preds, step_positions, axis=-2)\n model_step = tf.keras.Model(inputs=sequence, outputs=preds_step)\n\n # replace model\n if self.ensemble is not None:\n self.ensemble = model_step\n elif head_i is not None:\n self.models[head_i] = model_step\n else:\n self.model = model_step", "def x_next( self , x , u , t = 0 , dt = 0.1 , steps = 1 ):\n \n x_next = np.zeros(self.n) # k+1 State vector\n \n # Multiple integration steps\n for i in range(steps):\n \n x_next = self.f(x,u,t) * dt + x\n \n # Multiple steps\n x = x_next\n \n return x_next", "def create_step(self, step):\n raise NotImplementedError", "def step(amplitude, t_stop):\n times = np.array([0, t_stop/10, t_stop])\n amps = np.array([0, amplitude, amplitude])\n return times, amps", "def step(self):\r\n raise NotImplementedError", "def step(self, steps):\n nSteps = abs(steps)\n for s in xrange(0,nSteps):\n if (self.stepperMode==FULL_STEP):\n phase = s%4\n if (steps>0):\n self._fireSignal(self.A0,self.A1, self.fullStepCoilA[phase])\n self._fireSignal(self.B0,self.B1, self.fullStepCoilB[phase])\n else :\n self._fireSignal(self.A0,self.A1, self.fullStepCoilB[phase])\n self._fireSignal(self.B0,self.B1, self.fullStepCoilA[phase])\n sleep(self.delayLength)\n\n elif (self.stepperMode==HALF_STEP):\n phase = s%8\n if (steps>0):\n self._fireSignal(self.A0,self.A1, self.halfStepCoilA[phase])\n self._fireSignal(self.B0,self.B1, self.halfStepCoilB[phase])\n else :\n self._fireSignal(self.A0,self.A1, self.halfStepCoilB[phase])\n self._fireSignal(self.B0,self.B1, self.halfStepCoilA[phase])\n sleep(self.delayLength)", "def create_step_samples(self):\n pass # Deferred to subclasses\n\n \"\"\" Example using pod height:\n start_value = self.sim.pod.last_height\n end_value = self.sim.pod.height\n\n # Lerp values to get samples\n samples = start_value + self.step_lerp_pcts * (end_value - start_value) # Or use self.lerp(start_value, end_value), but doing it directly is faster since no function call\n if self.noise_scale > 0:\n # Add gaussian noise if specified\n return samples + np.random.normal(0.0, noise_scale, len(samples))\n else:\n # No noise\n return samples \n \"\"\"", "def _EM_step(F,G,Y,dt):\n dW = np.sqrt(dt) * np.random.randn(Y.shape[0])\n Ynext = Y + F(Y) * dt + G(Y) * dW\n return Ynext", "def step(self):\n raise NotImplementedError", "def make_step_tensor(freq_signal):\n step_func = (freq_signal>=0).astype(np.int)*2 # wikipedia says that this should be 2x the original.\n step_func[freq_signal==0] = 0 # https://en.wikipedia.org/wiki/Analytic_signal (this shouldn't actually matter i think.\n step_tensor = tf.constant(step_func, dtype=tf.complex64)\n step_tensor = tf.expand_dims(step_tensor, 0)\n step_tensor = tf.expand_dims(step_tensor, 1)\n return step_tensor", "def simulationStep(self, step=0):\n self._message.queue.append(constants.CMD_SIMSTEP2)\n self._message.string += struct.pack(\"!BBi\", 1+1+4, constants.CMD_SIMSTEP2, step) \n result = self._sendExact()\n for module in self._modules.values():\n module.subscriptionResults.reset()\n numSubs = result.readInt()\n responses = []\n while numSubs > 0:\n responses.append(self._readSubscription(result))\n numSubs -= 1\n return responses", "def _step(self):\n pass", "def generate_seq(self):\n\n # Variable initialization\n eos = False\n c_s = 99\n x = []\n y = []\n\n while not eos:\n\n # Start of sequence\n if c_s == 99:\n # Sample from initial\n c_s = self.sample_p(self.proba[\"initial\"])\n\n # Consecutive iterations\n\n # We generate until we get length of self length\n elif len(x) < self.length:\n # Sample from transition of last state\n c_s = self.sample_p(self.proba[\"transition\"][c_s])\n\n # Generate emission\n\n # Note that we append the states as labels and observations as input\n y.append(c_s)\n x.append(self.sample_p(self.proba[\"emission\"][c_s]))\n\n else:\n eos = True\n\n # We get the state ID by offseting their idx by the length of observations\n ofs = len(self.obs)\n y = [i + ofs for i in y]\n return (x, y)", "def step(a=0):\n\n global simulator, recorder\n if simulator is None:\n print \"Program is not started\"\n return\n __record(pc(), step, a)\n try:\n simulator.step(a)\n except:\n simulation_error()\n exec_hooks(step)\n arrows()", "def step(self):\n\n pass", "def step(self):\r\n cmd = struct.pack('>B', 54)\r\n self.send(cmd)", "def eased_step_gen(track, t=None, srate=None):\n if t is None:\n t = time_gen(srate=srate)\n else:\n t = iter(t)\n t0 = next(t)\n t1 = t0\n old_value = None\n duration = 1\n ease_duration = 0\n for tple in track:\n if hasattr(tple,'__getitem__'):\n value = tple[0]\n if len(tple) > 1:\n duration = tple[1]\n if len(tple) > 2:\n ease_duration = tple[2]\n else:\n value = tple\n if old_value is None:\n old_value = value\n #print(value, duration, ease_duration)\n local_ease_duration = min(ease_duration, duration)\n while t0 + duration > t1:\n local_t = t1 - t0\n if local_t < local_ease_duration:\n mu = local_t / local_ease_duration\n yield old_value + mu * (value - old_value)\n else:\n yield value\n t1 = next(t)\n t0 = t1\n old_value = value", "def _gen(x_t, states, previous_direction=None):\n # Append the DWI ID of each sequence after the 3D coordinates.\n subject_ids = np.array([subject_id] * len(x_t), dtype=floatX)[:, None]\n\n if not self.use_previous_direction:\n x_t = np.c_[x_t, subject_ids]\n else:\n x_t = np.c_[x_t, subject_ids, previous_direction]\n\n results = f(x_t, *states)\n next_x_t = results[0]\n next_states = results[1:]\n return next_x_t, next_states", "def _generate_signal(self):\n x = np.arange(self.n, dtype='float')\n resample = np.random.rand(self.n) >= self.proba\n resample[0] = True # randomly initialize first sample\n x[resample] = np.random.randn(np.sum(resample))\n for i in x[~resample]:\n x[int(i)] = x[int(i)-1]\n return x", "def train(self, steps):\r\n for e in range(steps):\r\n # do something...\r\n pass\r\n return self.get_value_function()", "def step_forward(self):", "def generate_progression(start_number, step, length):\n progression = []\n for index in range(length + 1):\n progression.append(start_number + step * index)\n return [str(number) for number in progression]", "def step_linear_double(step):\n return step * 2", "def regular(step, start=0.):\n\n def output(low, high):\n newstart = math.ceil((low - start)/step) * step + start\n return numpy.arange(newstart, high, step, dtype=numpy.float)\n output.func_name = \"regular(%g, start=%g)\" % (step, start)\n return output", "def step_constant(step):\n return step", "def getSteps():", "def step(self,dt):\r\n\r\n # if float(dt) > 50.0:\r\n # raise Exception(\"dt is too big (>50 seconds)\")\r\n\r\n # send dynamics forward one s\r\n self.rv_eci = rk4_propagate(self.rv_eci,dt,self.earth)", "def step(self):\n raise NotImplementedError()", "def step(self):\n raise NotImplementedError()", "def step(self):\n raise NotImplementedError()", "def _sim_step(self, u):\n raise NotImplementedError", "def generateSignal(self, input):\n raise NotImplementedError(\"generateSignals() not implemented!\")", "def do_step(self) -> None:", "def delay_times_linear(min_t, max_t, step_size):\n return np.flip(np.arange(max_t, min_t - step_size, -step_size))", "def start(self, step=None):\n\n if step is None:\n while True:\n next_event = self._pop_next_event()\n if next_event:\n self.current_time = next_event.datetime\n next_event.call()\n else:\n break\n else:\n # TODO: this is not right...\n while True:\n run_to = self.current_time + step\n while True:\n next_event = self._pop_next_event(run_to)\n if next_event:\n next_event.call()\n else:\n break\n print \"{time} Simulation Finished\".format(time=self.current_time)", "def horde_step(self, observation):", "def step(self, **kwargs):\n pass", "def transduce(self,inputs):\n self.start()\n return [self.step(inp) for inp in inputs]", "def steps(self, step_count):\n self.dir.value(0 if step_count > 0 else 1)\n for i in range(abs(step_count)):\n self.stp.value(1)\n sleep_us(self.step_time)\n self.stp.value(0)\n sleep_us(self.step_time)\n self.current_position += step_count", "def value_steps(self, steps):\n return self.initial_value * self.schedule(steps / self.nvalues)", "def simulate(self, n, dt=None):\n for _ in range(n):\n self.step(dt)", "def next_step(self):\n\n y_next = []\n y_next.append(0)\n for i in range(1, len(self.x) - 1):\n x = self.x[i]\n\n y = self.constant* (self.y_current[i + 1] + self.y_current[i - 1] - 2 * self.y_current[i])\\\n + 2 * self.y_current[i] - self.y_previous[i]\n\n y_next.append(y)\n\n y_next.append(0)\n\n self.y_previous = copy.copy(self.y_current)\n self.y_current = copy.copy(y_next)\n\n if self.timestep % 10000 is 0:\n self.timeframes[self.timestep] = copy.copy(self.y_current)\n\n self.timestep += 1", "def smoothed(sequence, step=1, start=0):\n next_index = start + 1\n last = len(sequence) \n new_sequence = []\n if not step:\n return sequence\n ratio_step = step + 1\n for item in sequence:\n new_sequence.append(item)\n if next_index < last:\n next_item = sequence[next_index]\n ratio = (item + next_item) / (step + 1)\n ratio = int(ratio)\n for x in range(step):\n value = (ratio * x) + item\n new_sequence.append(int(value))\n next_index = next_index + 1\n return new_sequence", "def step_based(t, eta_init, last_eta, d = 0.01, r = 50):\n return eta_init*d**np.floor((1+t)/r)", "def myTakeStep2(x):\n s = 0.5\n x += np.random.uniform(-s, s, np.shape(x))\n return x", "def step(self, dt):\n return Vector(self.P.x + dt*self.V.x, self.P.y + dt*self.V.y)", "def simulate(self, rng=None):\n if rng is None:\n rng = self.rng\n x0 = self.prior.rvs()\n # T steps is T-1 transitions\n steps_idx = rng.multinomial(1, self.step_probs, self.T-1).argmax(axis=1)\n steps_taken = np.take(self.step_sizes, steps_idx, axis=0)\n steps_taken = np.vstack([x0, steps_taken])\n return steps_taken.cumsum(axis=0)", "def step(self, mu, sig, z, dt):\n # Parse state \n x = mu[0]\n y = mu[1]\n theta = mu[2]\n v = mu[3]\n omega = mu[4]\n \n A = self.A_calc(x,y,theta,v,omega,dt)\n \n # Update\n mu_next_p = self.update_mu(mu,dt)\n sig_next_p = A @ sig @ A.transpose() + self.R\n K = sig_next_p @ self.C.T @ inv(self.C @ sig_next_p @ self.C.T + self.Q)\n mu_next = mu_next_p + K @ (z - self.C @ mu_next_p)\n sig_next = (np.identity(5) - K @ self.C) @ sig_next_p\n return mu_next, sig_next", "def single_run(steps_number):\n values = list()\n numerator = 0\n for i in trange(1, steps_number):\n\n numerator += generate_episode()\n\n values.append(numerator / i)\n\n return np.array(values)", "def _step_gen(self, step, stop):\n while stop is None or self.current < stop:\n yield self.next()\n self.skip_forward(step - 1)\n else:\n raise StopIteration", "def steps(self, length):\n steps = max(1, round(self.length / length, 0))\n return 1.0 / steps, int(steps)", "def __next_step(self, state) -> None:\n self.days += 1", "def simulate_system(self, state, input, time_step = 0.01):\n\n #set arm position to x\n self.arm.reset(q=state[0:3],dq=state[3:6])\n\n #apply the control signal\n self.arm.apply_torque(input,time_step)\n\n #get the next step from the arm \n xnext = np.append(np.copy(self.arm.q),np.copy(self.arm.dq))\n\n return xnext", "def simulate_system(self, state, input, time_step = 0.01):\n\n #set arm position to x\n self.arm.reset(q=state[0:3],dq=state[3:6])\n\n #apply the control signal\n self.arm.apply_torque(input,time_step)\n\n #get the next step from the arm \n xnext = np.append(np.copy(self.arm.q),np.copy(self.arm.dq))\n\n return xnext", "def backtrack_steps():\n\n # Initialize position and number of steps\n x = 0\n n_steps = 0\n\n # Walk until we get to positive 1\n while x < 1:\n x += 2 * np.random.randint(0, 2) - 1\n n_steps += 1\n\n return n_steps", "def step(self, move):", "def _setup_next_sequence(cls):\n return 0", "def sequence(self, n, show_progress=True):\n seq = np.ones(n)\n if show_progress:\n for i in tqdm(range(n), ascii=True, desc=f'Generating {n} bit sequence'):\n seq[i] = self.shift()\n else:\n for i in range(n):\n seq[i] = self.shift()\n return seq.astype(int)", "def step(self):\n self.schedule.step()", "def get_step(self):\n direction = choice([1, -1])\n distance = choice([0, 1, 2, 3, 4])\n step = direction * distance\n return step", "def get_step(self):\n direction = choice([1, -1])\n distance = choice([0, 1, 2, 3, 4])\n step = direction * distance\n return step", "def step(node,path):\n return (node[0]+path[0],node[1]+path[1])", "def take_step(self):\n choices_of_steps = [(0,1), (1,0), (0,-1), (-1,0)]\n return random.choices(choices_of_steps)[0]", "def config_step_sweep(self):\n self.write(\":SOUR:FREQ:MODE SWE;\"\n \":SOUR:SWE:GEN STEP;\"\n \":SOUR:SWE:MODE AUTO;\")", "def increment_steps(self):\n self.num_steps += 1", "def step(self, action):\n pass", "def step(self, action):\n pass", "def make_step(self):\n name = self.method.lower()\n if name==\"euler\": run_step = solvers.euler_step\n elif name==\"huen\": run_step = solvers.huen_step\n elif name==\"rk4\": run_step = solvers.rk4_step\n else: run_step = solvers.rk23_step\n\n def step_func(m):\n return run_step(self.dt,m,self.torque)\n self.step_func = step_func", "def calculate_signal(phases):\n signals = np.real(np.sum(np.exp(1j*phases), axis = 1))\n return signals", "def turn_steps(self, steps, delay_ms=1):\n if steps < 0:\n direction = -1\n else:\n direction = 1\n for _ in range(abs(int(steps))):\n self.current_step += direction\n element = STEP_ELEMENTS[self.current_step % N_STEP_ELEMENTS ]\n self.set_bits(element)\n time.sleep_ms(delay_ms)", "def step(self, dt):\n \n # get the current stage of the integration\n k_num = self.cstep\n\n for array in self.arrays:\n\n np = array.get_number_of_particles()\n\n # get the mapping for this array and this stage\n to_step = self.step_props[ array.name ][k_num]\n\n for prop in to_step:\n\n initial_prop = to_step[ prop ][0]\n step_prop = to_step[ prop ][1]\n\n initial_arr = array.get( initial_prop )\n step_arr = array.get( step_prop )\n\n updated_array = initial_arr + step_arr * dt\n\n # simply use periodicity for the positions\n if prop in ['x', 'y', 'z']:\n updated_array[numpy.where(updated_array < 0)[0]] += 1\n updated_array[numpy.where(updated_array > 1)[0]] -= 1\n\n array.set( **{prop:updated_array} )\n\n # Increment the step by 1\n self.cstep += 1", "def step(self):\n self.latent.step()", "def step(self, n, dlist):\n pass", "def generate_synth_data(n):", "def step(self, dt_usec):\n pod = self.sim.pod\n \n # Note: sensors always return a list of namedtuples. In this case, we always only return 1 'sample' per step. \n data = [self.sim.elapsed_time_usec, pod.position, pod.velocity, pod.acceleration, pod.he_height]\n for force in self.sim.pod.step_forces.values():\n data.extend([force.x, force.y, force.z])\n\n samples = [self.data(*data)] # List containing a single named tuple\n \n for step_listener in self.step_listeners:\n step_listener.step_callback(self, samples)", "def stair(self, steps):\n s_list = range(steps, 0, -1)\n return _BosonicPartitions(s_list)", "def stepify(times, values):\n new_times = np.empty((2*times.size - 1,))\n new_values = np.empty_like(new_times)\n new_times[::2] = times\n new_times[1::2] = times[1:]\n new_values[::2] = values\n new_values[1::2] = values[:-1]\n return new_times, new_values", "def Step(self, *args):\n return _gmat_py.Propagator_Step(self, *args)", "def make_time_steps(num_steps, interval):\n if num_steps % 2 != 0:\n raise ValueError('num_steps, %d, must be even'%num_steps)\n interval = int(interval)\n time_steps = np.zeros(num_steps, dtype=int)\n time_steps[::2] = interval*np.arange(num_steps/2)\n time_steps[1::2] = 1 + interval*np.arange(num_steps/2)\n return time_steps", "def get_step(self):\n direction = choice([1,-1])\n direction = choice([0, 1, 2, 3, 4])\n step = direction * distance\n return step", "def step(self, state):\n a = super().step(state)\n return np.clip(a, -1, 1)", "def step(self, state):\n a = super().step(state)\n return np.clip(a, -1, 1)", "def step(self):\n return self._step", "def simulate_signal(self, when: datetime):\n self.data.simulate(when)\n self.when = when\n positions = self.signal()\n self.data.end_simulation()\n return positions", "def leap_frog_steps(self, steps, t_step):\n\n\t# first, compute the gravitational potential\n self.get_V()\n\t# then, give the psi field an initial kick - common in leap frog-type algorithms\n self.update_momentum(t_step/2)\n\t# then, just simulate a number of steps into the future\n for i in range(steps):\n self.update_position(t_step)\n self.get_V()\n self.update_momentum(t_step)\n print('step ', i)", "def step(self, inp):\n nState, output = self.getNextValues(self.currState, inp)\n self.currState = nState\n return output", "def step(self, action):" ]
[ "0.65995204", "0.63594466", "0.6249169", "0.61852676", "0.6093541", "0.6077157", "0.6051659", "0.6038631", "0.60360247", "0.59713215", "0.59579283", "0.59446794", "0.5920504", "0.59188426", "0.59027636", "0.589569", "0.58939064", "0.5887116", "0.5883278", "0.5864464", "0.5859242", "0.5841644", "0.58038443", "0.57770354", "0.57252085", "0.57057106", "0.5705457", "0.57014805", "0.5696663", "0.56860775", "0.56738496", "0.56721175", "0.5636304", "0.563389", "0.5629065", "0.5628108", "0.5613197", "0.56021374", "0.55927825", "0.5572549", "0.55702144", "0.556281", "0.556281", "0.556281", "0.5552692", "0.55511314", "0.5539589", "0.55243856", "0.55204296", "0.5515083", "0.55107224", "0.55099595", "0.5502601", "0.5487274", "0.54762906", "0.54718703", "0.54712415", "0.5466108", "0.5450406", "0.5445589", "0.5443398", "0.5435478", "0.54296327", "0.542679", "0.54188544", "0.5418067", "0.5417298", "0.5417298", "0.54137", "0.53978264", "0.5394757", "0.53911126", "0.5388134", "0.5386847", "0.5386847", "0.5381987", "0.53738374", "0.53702825", "0.5354844", "0.5350084", "0.5350084", "0.5345379", "0.53443205", "0.5343984", "0.53393126", "0.5338605", "0.53200096", "0.53179336", "0.5311619", "0.530916", "0.53080916", "0.5299493", "0.52987283", "0.5298397", "0.5287158", "0.5287158", "0.52818936", "0.52775234", "0.52765787", "0.52732074", "0.5263418" ]
0.0
-1
Settings for a random step sequence
def __init__(self, step_time, step_interval=None, n_step=None, ss=None): self.ss = ss self.n_step = n_step self.interval = step_interval self.step_time = step_time
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def random_step(self):\n pos = [i for i in range(9) if self.grid[i] == 0]\n move = random.choice(pos)\n return self.step(move)", "def random_step(self):\n pos = [i for i in range(9) if self.grid[i] == 0]\n move = random.choice(pos)\n return self.step(move)", "def generate_random(self: object) -> None:\n self.random.set(Sequence.generate(length=50))", "def take_step(self):\n choices_of_steps = [(0,1), (1,0), (0,-1), (-1,0)]\n return random.choices(choices_of_steps)[0]", "def _setup_next_sequence(cls):\n return 0", "def randomize_trajectory(self):\n self.angle = randint(-360, 360)\n self.speed = randint(1, 5)/2.5", "def setseq():\n\n if seqRnd[0]:\n r = getrndseq(seqRndWeight)\n if seqRnd[0] == 1:\n gbl.seqCount = r\n r = -1\n else:\n r = -1\n\n return ( r, seqRnd[1:] )", "def _setVals(self, step=0):\n self.step = step", "def evaluate_config(rnd: int):\n val_steps = 5 if rnd < 4 else 10\n return {\"val_steps\": val_steps}", "def init_random_state(self):\n self.current_state = self.rng.uniform(size=[1, self.num_spins])\n self.current_state = np.where(self.current_state < 0.5, -1.0, 1.0)", "def experiment4():\n np.random.seed()\n state['result'] = np.random.rand(1)", "def test_case_generate(self):\n\n # initialization\n state = np.random.choice(self.init_states)\n model = rm.randint(0, self.model_num - 1)\n duration = np.random.choice(self.step_values)\n temp = rm.randint(self.min_temp, self.max_temp)\n\n self.states = [[model, duration, temp]]\n self.time = duration\n\n while self.time < self.max_time:\n if state == \"inc_tmp\":\n change = np.random.choice(\n self.transitionName[0], p=self.transitionMatrix[0]\n ) # choose the next state\n if change == \"S1S1\": # stay in the same state\n temp = self.get_temp_inc(temp)\n model = rm.randint(0, self.model_num - 1)\n diff = (\n self.max_time - self.time\n ) # this is for ensuring the maximum duration is not exceeded\n if (diff) < self.max_step and (diff) > self.min_step:\n duration = diff\n self.states.append([model, duration, temp])\n return self.states_to_dict()\n elif diff < self.min_step:\n self.states[len(self.states) - 1][1] += diff\n return self.states_to_dict()\n else:\n duration = np.random.choice(self.step_values)\n self.time += duration\n self.states.append([model, duration, temp])\n\n elif change == \"S1S2\": # change from increase to decrease\n temp = self.get_temp_dec(temp)\n model = rm.randint(0, self.model_num - 1)\n state = \"dec_tmp\"\n\n diff = self.max_time - self.time\n if (diff) < self.max_step and (diff) > self.min_step:\n duration = diff\n self.states.append([model, duration, temp])\n return self.states_to_dict()\n elif diff < self.min_step:\n self.states[len(self.states) - 1][1] += diff\n return self.states_to_dict()\n else:\n duration = np.random.choice(self.step_values)\n self.time += duration\n self.states.append([model, duration, temp])\n else:\n print(\"Error\")\n\n elif state == \"dec_tmp\":\n change = np.random.choice(\n self.transitionName[1], p=self.transitionMatrix[1]\n )\n if change == \"S2S1\":\n temp = self.get_temp_inc(temp)\n model = rm.randint(0, self.model_num - 1)\n state = \"inc_tmp\"\n diff = self.max_time - self.time\n if (diff) < self.max_step and (diff) > self.min_step:\n duration = diff\n self.states.append([model, duration, temp])\n return self.states_to_dict()\n elif diff < self.min_step:\n self.states[len(self.states) - 1][1] += diff\n return self.states_to_dict()\n else:\n duration = np.random.choice(self.step_values)\n\n self.time += duration\n self.states.append([model, duration, temp])\n\n elif change == \"S2S2\":\n temp = self.get_temp_dec(temp)\n model = rm.randint(0, self.model_num - 1)\n\n diff = self.max_time - self.time\n if (diff) < self.max_step and (diff) > self.min_step:\n duration = diff\n self.states.append([model, duration, temp])\n return self.states_to_dict()\n elif diff < self.min_step:\n self.states[len(self.states) - 1][1] += diff\n return self.states_to_dict()\n else:\n duration = np.random.choice(self.step_values)\n self.time += duration\n self.states.append([model, duration, temp])\n\n else:\n print(\"Error\")\n pass\n else:\n print(\"Error\")\n\n return self.states_to_dict()", "def next_state(self):\n \n self.state = np.random.choice(['checkout', 'dairy', 'drinks', 'fruit', 'spices'], p=self.tr_array_dict[f'{self.state}'])", "def set_rand_seed(self, idx):\n random.seed(self.base_seed + self.epoch + idx // 2)", "def step(self):\n\n self.agents[random.randint(self.get_agent_count())].step()\n self.steps += 1\n self.time += 1", "def setUp(self) -> None:\n self.random = np.random.RandomState(seed=42)", "def sequence_params(self):", "def initialize_random_number_generator(self,question_type):\n\t\tself.generator.seed(self.generate_index(self.magic, self.level, self.problem_id, question_type))", "def setRandom(self):\n pass # define each VarElement family", "def swait_setup_random_number(swait, **kw):\n swait.reset()\n swait.scan.put(\"Passive\")\n swait.calc.put(\"RNDM\")\n swait.scan.put(\".1 second\")\n swait.desc.put(\"uniform random numbers\")", "def nextPhase(self):\n\n if self.sensorType == SENSOR_TYPES[\"TEMP\"]:\n self.value = self.randGen.choice(TEMP_RANGE)\n else:\n self.value = self.randGen.randint(0, 100)", "def random_walk(turtle, distance, steps):\n turtle.color(randcolor(), randcolor())\n for step in range(0,steps):\n random_move(turtle, distance)\n gohome(turtle)", "def seed():", "def setUp(self):\n self.samples = 5\n self.otus = 10\n seed(0) # this will seed numpy prng at 0 before each test", "def set_first_machine_time_step(self, first_machine_time_step):", "def step_particles(particle,self):\n\n self.models[particle].step()\n\n self.states[particle] = (self.models[particle].agents2state()\n\n + np.random.normal(0, self.particle_std**2, \n\n size=self.states[particle].shape))\n\n self.models[particle].state2agents(self.states[particle])\n\n return self.models[particle], self.states[particle]", "def set_IStep(self, istep = [0], istep_sigma = [0], tstep = 5, tdur = 1e6, give_freq = True):\n if self.do_run:\n #for m in self.ic_steps:\n # m.destroy()\n # del m \n #del self.ic_steps\n \n #self.ic_steps = []\n \n istep = list(istep)\n neg = False\n \n for n in range(self.n_celltypes):\n \n if istep[n] < 0: \n neg = True\n istep[n] = abs(istep[n]) # make positive again\n \n if istep[n] != 0:\n if give_freq is True:\n a = np.array([istep[n]])\n iin = self.get_i(a, n)[0]\n if self.id == 0: print \"celltype: \", n, \" istep: \", istep[n], \"Hz => \", iin, \" nA\"\n istep[n] = iin \n \n for n in range(self.n_celltypes):\n \n for i, gid in enumerate(self.gidlist[n]): # for every cell in the gidlist \n \n np.random.seed(gid*30)\n \n if self.i_holdrs == []:\n \n if istep_sigma[n] != 0:\n istep_r = np.random.normal(istep[n], istep[n]*istep_sigma[n], 1).clip(min=0)\n else: # same ihold for all cells!\n istep_r = istep[n]\n \n else: # ihold has been set!\n \n if istep_sigma[n] != 0:\n istep_r = np.random.normal(istep[n]-self.i_holds[n], (istep[n]-self.i_holds[n])*istep_sigma[n], 1).clip(min=0) # delta now! put on top of hold!\n else: # same ihold for all cells!\n istep_r = istep[n]-self.i_holds[n] # delta now! put on top of hold!\n \n if neg:\n istep_r = -1*istep_r\n \n if istep[n] == 0:\n istep_r = -1*self.i_holdrs[n][i] \n \n #print 'is:' + str(istep_r) + 'was:' + str(self.i_holdrs[n][i])\n \n if istep_r != 0: \n # step current\n ic_step = h.IClamp(self.cells[n][i].soma(0.5))\n ic_step.delay = tstep/ms\n ic_step.dur = tdur/ms\n ic_step.amp = istep_r/nA\n self.ic_steps.append(ic_step)\n \n \n if self.id == 0: print \"set_IStep finished. istep: \", istep, \", istep_sigma: \", istep_sigma", "def _choose_sample(self):\n\n \t #periodically generate a new reconstruction for the purposes of sampling", "def setUp(self):\n # record the randomness used in case the test fails:\n self.rand_seed = int(time.time())\n sr.seed(self.rand_seed)\n print(\"seed for this test: \" + str(self.rand_seed))", "def totem_random():\n random_head()\n random_head()\n random_head()", "def simulate(self):\n self.round += 1", "def set_seed(self, seed: int):\n self.rsimulator.set_seed(seed)\n # Maybe call new game here?", "def NewRndSeed(ss):\n ss.RndSeed = int(datetime.now(timezone.utc).timestamp())", "def setUp(self):\n # record the randomness used in case the test fails:\n rand_seed = int(time.time())\n sr.seed(rand_seed)\n print(\"seed for this test: \" + str(rand_seed))", "def __random_movement(self):\n\t\tself.__steps += 1 \t\t# Increment after every frame\n\t\t# When __steps greater than threshold reverse the direction\n\t\t# and set threshold to a new random value\n\t\tif self.__steps >= self.__threshold_steps:\t\n\t\t\tif self.direction == 'RIGHT':\n\t\t\t\tself.move_left()\n\t\t\t\tself.direction = 'LEFT'\n\t\t\telse:\n\t\t\t\tself.move_right()\n\t\t\t\tself.direction = 'RIGHT'\n\t\t\tself.__threshold_steps = random.randint(25,50)\n\t\t\tself.__steps = 0\n\t\t# Confines the Donkeys movement to within the boundary \n\t\tself.__check_boundary()", "def rand(self):\n raise NotImplementedError", "def step(self, state):", "def random(self, n=1):\n # self.num_generated += n", "def random_params_gen(self) -> TransformParams:\n while True:\n do_hor_flip = self.horizontal_flip and (np.random.random() < 0.5)\n do_vert_flip = self.vertical_flip and (np.random.random() < 0.5)\n\n yield TransformParams(do_hor_flip=do_hor_flip,\n do_vert_flip=do_vert_flip)", "def SetRandomSeed(seed):\n global option\n option['random_seed'] = seed", "def __init__(self, random_state):\n self.random_state = random_state\n self.random_generator = RandomState(self.random_state)", "def _sample_seed(self):\n raise Exception(\" not implemented in base model\")", "def start_new_chain(self, random_seed=None):\n if random_seed is not None:\n np.random.seed(random_seed)\n\n if self.n_burning > 0:\n parameters = self.sample_parameters(float(self.n_burning) / (self.thinning + 1))\n else:\n parameters = [self.samples_parameters[-1]]\n\n self.samples_parameters = []\n self.samples_parameters.append(parameters[-1])\n self.start_point_sampler = parameters[-1]", "def setup_method(cls):\n seed()", "def corun(self, step:int=0):\n if step > 0:\n _range = range(self.current_step, self.current_step + step + 1)\n else: # run forever\n _range = itertools.count(self.current_step)\n for step_num in _range:\n self.config_template = (yield self.step()) or self.config_template", "def randomize(self):\n \n spins = [np.random.random() > 0.5 for x in range(self.size)]\n self.spins_initial = bitarray.bitarray(spins)", "def step(self, steps):\n self._simulate(endStep=self.currentStep+steps)", "def _sample_steps(self):\n mixture_size = self.parameters['fixed_mixture_size']\n if not self.is_correlated_mixture and mixture_size is None:\n return self.get_steps('monte_carlo')\n else:\n return self.get_steps('metropolis')", "def _random_warmup(self, num_steps):\n new_frame = self.env.reset()\n reward = 0.0\n action = 0\n done = False\n self.memory.add_experience(action, reward, new_frame, 1, done)\n \n for i in range(num_steps):\n \n action = np.random.randint(self.num_actions)\n new_frame, reward, done, _ = self.env.step(action)\n self.memory.add_experience(action, reward, new_frame, 1, done)\n \n if done:\n new_frame = self.env.reset()\n self.memory.add_experience(0, 0.0, new_frame, 1, False)\n\n self.memory.add_experience(0, 0.0, new_frame, 1, True)", "def rand(self):\n self.state = (self.a * self.state + self.c)\n return self.state", "def randomize_position(self, w, steps = 3):\n \n #self.red.set_power(0)\n \n for k in range(steps):\n for idx,waveplate in enumerate(w):\n print '* Randomizing %s waveplate (step %d) ...'%(waveplate, k)\n self.rotator.quick_scan(np.random.uniform(low = -20000, high = 20000) ,getattr(self,'_'+waveplate+'_channel'))", "def update_random_state(self):\n self.random_state = RandomState()", "def setSeqRnd(ln):\n\n global seqRnd\n\n emsg = \"use [ON, OFF or TrackList ]\"\n if not ln:\n error(\"SeqRnd:\" + emsg)\n\n a=ln[0].upper()\n\n if a in (\"ON\", \"1\") and len(ln) == 1:\n seqRnd = [1]\n\n elif a in (\"OFF\", \"0\") and len(ln) == 1:\n seqRnd = [0]\n\n else:\n seqRnd=[2]\n for a in ln:\n a = a.upper()\n if not a in gbl.tnames:\n error(\"SeqRnd: Track '%s' does not exist, %s\" % (a, emsg))\n if a in seqRnd:\n error(\"SeqRnd: Duplicate track '%s' specified, %s\" % (a, emsg))\n seqRnd.append(a)\n\n if gbl.debug:\n print \"SeqRnd:\",\n if seqRnd[0] == 2:\n for a in seqRnd[1:]:\n print a,\n print\n elif seqRnd[0] == 1:\n print \"On\"\n else:\n print \"Off\"", "def set_seed(self,seed):\r\n if seed is None:\r\n warnings.warn(\r\n \"Initializing player with seed from Axelrod module random number generator. \"\r\n \"Results may not be seed reproducible.\")\r\n self._seed = _module_random.random_seed_int()\r\n else:\r\n self._seed = seed\r\n self._random = RandomGenerator(seed=self._seed)\r\n self.base._random = self._random\r\n self.trust._random = self._random\r\n self.conviction._random = self._random\r\n \r\n self.generator = torch.Generator()\r\n self.generator.manual_seed(int(seed))", "def step(self, step=None):\n pass", "def start_random_sequence(self) -> int:\n return random.randint(0, TWO_BYTES)", "def rand(self):\n raise NotImplementedError(\"Not implemented yet.\")", "def configure_step(self):\n pass", "def setRandDirection(self):\n phi = 2*math.pi*random.random()\n u = 2*random.random() - 1\n v = math.sqrt(1-u*u)*math.cos(phi)\n w = math.sqrt(1-u*u)*math.sin(phi)\n self.direction = (u,v,w)", "def set_states(self, states):\n if states is None:\n logging.getLogger('eval').warning(\n 'could not reproduce state, setting unreproducable random seed for all random states')\n self.randomstate.seed(np.random.randint(0, 1000000))\n if hasattr(self, 'random_mask_state'):\n self.random_mask_state.seed(np.random.randint(0, 100000))\n if hasattr(self, 'deformrandomstate'):\n self.deformrandomstate.seed(np.random.randint(0, 100000))\n else:\n if hasattr(self, 'random_mask_state') and 'random_mask_state' in states:\n self.random_mask_state.set_state(states['random_mask_state'])\n if hasattr(self, 'deformrandomstate') and 'deformrandomstate' in states:\n self.deformrandomstate.set_state(states['deformrandomstate'])\n self.randomstate.set_state(states['randomstate'])", "def configure_step(self):\n\n pass", "def setSeqRndWeight(ln):\n\n global seqRndWeight\n\n seqRndWeight = getweights(ln, \"SeqRndWeight\")", "def setRandomSensitivitySpeed(self) -> None:\n\n self.sensitivity = randint(20, 70)\n self.speed = randint(7, 12)", "def __dice_generator(self):\n self.current_dice = np.random.randint(1, 6 + 1)", "def seed(seed: int):\n # all sampling is actually happening in the move_cube module\n move_cube.seed(seed)", "def SetStepDelay(self,delay=200): \n self.Bus.Transaction(chr(self.Address)+chr(0x43)+chr(delay))", "def __init__(self):\n \"\"\" action_ space : pick up location , Drop location\n state_space : location , time (hours) , day\n state_init : random pick from the state_space \"\"\"\n self.action_space = [(i,j) for i in range(m) for j in range(m) if i!=j or i==0]\n # Total states (Xi Tj Dk)\n self.state_space = [[x, y, z] for x in range(m) for y in range(t) for z in range(d)]\n # random Initialize of state (location, hours, day)\n self.state_init = random.choice(self.state_space)\n # Start the first round\n self.reset()", "def _random_start_position(self):\r\n self.position = np.array(random.choice(self.start_positions),\r\n dtype=np.int16)", "def _rgbSequenceInit(self):\n ## send all of this to sequence acq\n if not self.nbFrames:\n self.nbFrames = int(self.duration/self.cycleTime)+1 ## Determine number of frames. (+1) because int round at the lower int\n self.ledSeq = [0]*self.rgbLedRatio[0]+[1]*self.rgbLedRatio[1]+[2]*self.rgbLedRatio[2] #Sequence of LED lighting in function of the ratio\n #RED = 0\n #GREEN = 1\n #BLUE = 2\n print('LED sequence : ', self.ledSeq)\n self.ledList = self.ledSeq*(int(self.nbFrames/(len(self.ledSeq)))+1) ## schedule LED lighting\n #NB : no return needed because each ledList and nbFrames are instance attribute", "def reinitialize(self, random_state):\n pass", "def _set_seed(self) -> None:\r\n random.seed(self.seed)\r\n np.random.seed(self.seed)", "def trial_config(self, prev_config, cov_config=1e-2):\r\n return prev_config + np.random.normal(0, cov_config, len(prev_config))", "def randomize_value(self) -> None:", "def make_step(self):\n self.step_vals = np.cumsum(self.vals)", "def gen_random_walk(self,n_step=100):\n # Warning about the small number of steps\n if n_step < 30:\n print(\"WARNING! The number of steps is small. It may not generate a good stochastic process sequence!\")\n \n w = np.ones(n_step)*self.x0\n \n for i in range(1,n_step):\n # Sampling from the Normal distribution with probability 1/2\n yi = np.random.choice([1,-1])\n # Weiner process\n w[i] = w[i-1]+(yi/np.sqrt(n_step))\n \n return w", "def set_seed(self, seed=None):\n super().set_seed(seed=seed)\n for t in self.policy_list:\n t.set_seed(self._random.random_seed_int())", "def _step(self):\n title()\n self.runCount = 1\n self.experiment.pause = False\n self._runExperiment()\n self.pause = True", "def reset(self):\n \n self.steps = 0\n if self.episode == 0:\n self.ins = random.uniform(self.mins.values[:4],self.maxes.values[:4])\n #get the corresponding outputs:\n out_flow = self.temp_func(var=self.O_CH4_flow_uniformity)\n out_frac = self.temp_func(var=self.O_CH4_mol_frac)\n out_temp = self.temp_func(var=self.O_t)\n\n outs = np.array([out_flow,out_frac,out_temp])\n self.starts = np.append(self.ins, outs)\n\n else:\n self.starts = self.state[:7] #previous episode's end state\n\n #get goals from random inputs:\n viable = False\n while viable == False:\n self.ins = random.uniform((self.mins.values[:4]+(self.mins.values[:4]*self.minmaxbuffer)),self.maxes.values[:4]-(self.maxes.values[:4]*self.minmaxbuffer))\n out_flow = self.temp_func(var=self.O_CH4_flow_uniformity)\n out_frac = self.temp_func(var=self.O_CH4_mol_frac)\n out_temp = self.temp_func(var=self.O_t)\n\n outs = np.array([out_flow,out_frac,out_temp])\n \n # Check if viable:\n viable = self.test_viable(outs)\n\n self.goals = outs\n\n # These are your current inputs:\n self.ins = self.starts[:4]\n # State carries the starting points and the goals.\n self.state = np.append(self.starts,self.goals)\n\n #Track episodes and total reward.\n self.episode += 1\n self.tot_rew = 0\n\n return (self.state)", "def step(self, **kwargs):\n pass", "def reset(self, setup=False):\n self._done = False\n self._nbSteps = 0\n\n x = None\n if (self.startPosX == 'random' and setup) or (\n self.startPosX == 'episodeRandom'):\n x = random.randint(0, self._width - 1)\n elif (self.startPosX == 'random' and not setup):\n x = self._initState[0]\n elif self.startPosX == 'center':\n x = self._width - 1\n else:\n x = int(self.startPosX)\n\n y = None\n if (self.startPosX == 'random' and setup) or (\n self.startPosX == 'episodeRandom'):\n y = random.randint(0, self._height - 1)\n elif (self.startPosY == 'random' and not setup):\n y = self._initState[1]\n elif self.startPosX == 'center':\n y = self._height - 1\n else:\n y = int(self.startPosX)\n\n self._currentPos = (x, y)\n self._trajectory = [(x, y)]\n\n return (x, y)", "def test_random_movement(\n size: Union[int, tuple], num_berries: int, delay_seconds: int, number_steps: int\n) -> None:\n game = Game(\n size,\n [0, 0],\n -1,\n 5,\n -5,\n 10,\n num_berries,\n berry_movement_probabilities=[0.5] * num_berries,\n )\n print(f\"Starting board:\\n{game.get_board()}\")\n done = False\n i = 1\n while not done and i < number_steps:\n print(f\"Action {i}\")\n time.sleep(delay_seconds)\n _, reward, done = game.step(random.choice(MOVEMENTS))\n print(f\"Board:\\n{game.get_board()}\")\n print(f\"Reward: {reward}\")\n i += 1", "def setup_steps(self):\n step1 = ground_step.Ground(5745, 495, 40, 44)\n step2 = ground_step.Ground(5788, 452, 40, 44)\n step3 = ground_step.Ground(5831, 409, 40, 44)\n step4 = ground_step.Ground(5874, 366, 40, 176)\n\n step5 = ground_step.Ground(6001, 366, 40, 176)\n step6 = ground_step.Ground(6044, 408, 40, 40)\n step7 = ground_step.Ground(6087, 452, 40, 40)\n step8 = ground_step.Ground(6130, 495, 40, 40)\n\n step9 = ground_step.Ground(6345, 495, 40, 40)\n step10 = ground_step.Ground(6388, 452, 40, 40)\n step11 = ground_step.Ground(6431, 409, 40, 40)\n step12 = ground_step.Ground(6474, 366, 40, 40)\n step13 = ground_step.Ground(6517, 366, 40, 176)\n\n step14 = ground_step.Ground(6644, 366, 40, 176)\n step15 = ground_step.Ground(6687, 408, 40, 40)\n step16 = ground_step.Ground(6728, 452, 40, 40)\n step17 = ground_step.Ground(6771, 495, 40, 40)\n\n step18 = ground_step.Ground(7760, 495, 40, 40)\n step19 = ground_step.Ground(7803, 452, 40, 40)\n step20 = ground_step.Ground(7845, 409, 40, 40)\n step21 = ground_step.Ground(7888, 366, 40, 40)\n step22 = ground_step.Ground(7931, 323, 40, 40)\n step23 = ground_step.Ground(7974, 280, 40, 40)\n step24 = ground_step.Ground(8017, 237, 40, 40)\n step25 = ground_step.Ground(8060, 194, 40, 40)\n step26 = ground_step.Ground(8103, 194, 40, 360)\n\n step27 = ground_step.Ground(8488, 495, 40, 40)\n\n self.step_group = pygame.sprite.Group(step1, step2,\n step3, step4,\n step5, step6,\n step7, step8,\n step9, step10,\n step11, step12,\n step13, step14,\n step15, step16,\n step17, step18,\n step19, step20,\n step21, step22,\n step23, step24,\n step25, step26,\n step27)", "def __init__(self, env, random_seed=None):\n self.env = env \n self.RandomState = np.random.RandomState(random_seed)", "def create_step_samples(self):\n pass # Deferred to subclasses\n\n \"\"\" Example using pod height:\n start_value = self.sim.pod.last_height\n end_value = self.sim.pod.height\n\n # Lerp values to get samples\n samples = start_value + self.step_lerp_pcts * (end_value - start_value) # Or use self.lerp(start_value, end_value), but doing it directly is faster since no function call\n if self.noise_scale > 0:\n # Add gaussian noise if specified\n return samples + np.random.normal(0.0, noise_scale, len(samples))\n else:\n # No noise\n return samples \n \"\"\"", "def Gen_RandLine(length, step_max, dims=2):\n \n lineData = np.empty((dims, length))\n lineData[:, 0] = np.random.rand(dims)\n for index in range(1, length):\n step = ((np.random.rand(dims) - 0.5)*step_max)\n lineData[:, index] = lineData[:, index - 1] + step\n return lineData", "def setUp(self):\n self.t = True\n self.f = False\n self.value = 25", "def __init__(self):\n \n smach.State.__init__(self, \n outcomes=['GoToNormal','GoToPlay'])\n \n self.rate = rospy.Rate(200) # Loop at 50 Hz", "def setUp(self):\n shape = RNG.integers(5, 50)\n periods = self.periods = RNG.normal() * 3\n freq = periods / shape\n amp = RNG.normal()\n offset = RNG.normal()\n phase = (RNG.normal() - 1 / 2) * 5 / 3 * np.pi\n p_gt = self.p_gt = (amp, freq, phase, offset)\n x = self.x = np.arange(shape)\n self.data = sine(x, *p_gt)", "def set_seed(self, seed: int):\n self.__sim.seed(seed)", "def __init__(self, allow_step_back=False):\n self.allow_step_back = allow_step_back\n self.np_random = np.random.RandomState()\n \"\"\" No big/small blind\n # Some configarations of the game\n # These arguments are fixed in Leduc Hold'em Game\n # Raise amount and allowed times\n self.raise_amount = 2\n self.allowed_raise_num = 2\n self.num_players = 2\n \"\"\"\n # Some configarations of the game\n # These arguments can be specified for creating new games\n\n # Small blind and big blind\n self.small_blind = 1\n self.big_blind = 2 * self.small_blind\n\n # Raise amount and allowed times\n self.raise_amount = self.big_blind\n self.allowed_raise_num = 2\n\n self.num_players = 2", "def test_init(self):\n global_step = tf.get_variable(\"global_step\", [], tf.int32,\\\n initializer=tf.constant_initializer(0, dtype=tf.int32),\n trainable=False)\n lstm_pi = LSTMPolicy((80,80,3), 4,global_step)", "def __init__(self, rate):\n super(RandomWander, self).__init__()\n self.iteration = 0\n self.rate = rate\n self.speed = 0\n self.heading = 0", "def step(self): \n self.reset_parameters()\n\n if np.random.uniform(0, 1) < self.model.churn_prob: self.exit_triggered = True \n if self.exit_triggered:\n self.exit()\n else:\n self.register_deposit(self.deposit_intent)\n self.register_contribution(self.contribution_intent)\n self.register_sponsorship(self.sponsor_intent)\n self.register_euro_exchange(self.euro_exchange_intent)\n self.register_teo_exchange(self.teo_exchange_intent)\n self.register_withdraw(self.withdraw_intent)", "def rand(self):\n return self.State.rand()", "def test_init():\n rng = NonRandom()\n seed = 5\n rng.setSeed(seed)\n wheel = Wheel(rng)\n assert len(wheel.bins) == 38\n assert wheel.rng.value == seed\n assert wheel.rng.choice(range(0, 38)) == range(\n 0, 38)[wheel.rng.value] # == seed", "def generator(self, random, args):\r\n raise NotImplementedError", "def test_run_sim_1():\n rnd = rand.Arrivals(36, 41)\n sim.run_sim(3, 2, 5, 6, 22, rnd)", "def play_random(env, steps):\n try:\n done = True\n progress = tqdm(range(steps))\n for _ in progress:\n if done:\n _ = env.reset()\n action = env.action_space.sample()\n _, reward, done, info = env.step(action)\n progress.set_postfix(reward=reward, info=info)\n env.render()\n except KeyboardInterrupt:\n pass\n # close the environment\n env.close()", "def make_repeatable():\n random.seed(1234)\n np.random.seed(1234)", "def __init__(self, initial_value, n_values, schedule):\n self.step = 0.\n self.initial_value = initial_value\n self.nvalues = n_values\n self.schedule = SCHEDULES[schedule]", "def get_next_sample(self):" ]
[ "0.6600735", "0.6600735", "0.6426488", "0.63580966", "0.63181347", "0.6233752", "0.6195526", "0.61665463", "0.6133349", "0.6077554", "0.60269153", "0.5976023", "0.595699", "0.59379584", "0.59343755", "0.59263146", "0.5924577", "0.59218735", "0.5883561", "0.5867385", "0.58269936", "0.5821847", "0.5802779", "0.5774282", "0.57555526", "0.57509786", "0.574638", "0.57450294", "0.5740118", "0.57344127", "0.5717241", "0.57172257", "0.56846654", "0.56758934", "0.567149", "0.56553286", "0.56395864", "0.5639068", "0.5637476", "0.56370026", "0.563278", "0.56323016", "0.5628614", "0.5627348", "0.56260455", "0.5609896", "0.5604609", "0.56016976", "0.5601342", "0.55982894", "0.5597746", "0.55915093", "0.55913436", "0.55911785", "0.5580702", "0.557787", "0.55691504", "0.55665696", "0.55651593", "0.55644536", "0.55555946", "0.5553894", "0.55499774", "0.5549558", "0.55430317", "0.55393404", "0.5526295", "0.5524297", "0.5523442", "0.5521429", "0.55153036", "0.5514629", "0.551019", "0.5507653", "0.5503758", "0.54958874", "0.54932344", "0.5486296", "0.54810774", "0.54761463", "0.5475091", "0.54738265", "0.54711276", "0.5468878", "0.5465642", "0.54418707", "0.5437692", "0.5435852", "0.54288685", "0.54225165", "0.54206306", "0.5419599", "0.54185426", "0.54017556", "0.54011166", "0.53993434", "0.53983533", "0.5395225", "0.53945327", "0.5394164", "0.5389844" ]
0.0
-1
Generate a random sequence
def out(self, t: any, dim=(None, None)) -> any: lB = self.interval[0] # Lower Boundary uB = self.interval[1] # Upper Boundary # Initialize random step vector each sampling period using comprehensive list. step_vector = [round(uniform(lB, uB), 1) for _ in range(self.n_step)] u = np.zeros(shape=dim) # Initialize step control input array u. j = 0 for i in range(len(t)): # Excluding the last point if t[i] % self.step_time == 0 and t[i] != 0 and j+1 != len(step_vector) and i != len(t)-1: # No last step j += 1 if self.ss is not None and j == 0: u[i, :] = self.ss else: u[i, :] = step_vector[j] return u
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_random_sequence():\n\n seq = []\n [seq.append(np.random.choice(cs.DNA_BASES)) for _ in range(cs.LENGTH)]\n\n return seq", "def generate_random(self: object) -> None:\n self.random.set(Sequence.generate(length=50))", "def start_random_sequence(self) -> int:\n return random.randint(0, TWO_BYTES)", "def random(self, seq=None):\n if seq is None:\n seq = self.seq\n seq_list = list(seq)\n random.shuffle(seq_list)\n return \"\".join(seq_list)", "def _generate_random_number_for_each_sequence(total, sequence_number):\r\n current_total = 0\r\n r = []\r\n for n in range(sequence_number-1, 0, -1):\r\n current = random.randint(1, total - current_total - n)\r\n current_total += current\r\n r.append(current)\r\n r.append(total - sum(r))\r\n random.shuffle(r)\r\n\r\n return r", "def choice(seq):\r\n i = int(random() * len(seq))\r\n return seq[i]", "def generate():\n s = random_data.random_bytes(100)\n return generate_from_string(s)", "def generate() -> int:\n return randint(0, 1000000000)", "def get_random_sequence(length):\n sequence = ''\n for i in range(length):\n random_letter = format(random.randrange(9), 'x')\n sequence = '{}{}'.format(sequence, random_letter)\n return sequence", "def generate_sequence(self, n=100, initial_state=None):\n\n if initial_state is None:\n if self.pad:\n sequence = [START_OF_SEQ] * self.order\n else:\n sequence = list(random.choice(self.records.keys()))\n else:\n sequence = initial_state[:]\n\n for i in range(n):\n current_state = tuple(sequence[-self.order:])\n next_token = self.sample(current_state)\n sequence.append(next_token)\n\n if next_token == END_OF_SEQ:\n return sequence\n\n return sequence", "def random_sample(seq):\r\n if len(seq) = 0:\r\n return None\r\n return sample(seq, randint(1, len(seq)/2))", "def rngnext():\n out = []\n # random\n state = random.getstate()\n out.append(f\"r={random.random():0.4f}\")\n random.setstate(state)\n\n # numpy\n state = np.random.get_state()\n out.append(f\"n={np.random.random():0.4f}\")\n np.random.set_state(state)\n\n # torch\n state = torch.random.get_rng_state()\n out.append(f\"t={torch.rand(1)[0]:0.4f}\")\n torch.random.set_rng_state(state)\n\n # cuda\n if torch.cuda.is_available():\n state = torch.cuda.get_rng_state()\n # note there is no function for generating a random in cuda but this may work?\n out.append(f\"c={state.float().std()%1:0.4f} {torch.backends.cudnn.deterministic}\")\n\n return out", "def rand(self):\n raise NotImplementedError(\"Not implemented yet.\")", "def generate_raiz():\n\treturn os.urandom(12)", "def random():\r\n return R.NextDouble()", "def rand(self):\n raise NotImplementedError", "def random(self, n=1):\n # self.num_generated += n", "def generate_sequence(n):\n\n sequence = []\n\n # generate sequence\n while n != 1:\n sequence.append(n)\n n = next_integer(n)\n\n # append 1 to sequence since all sequences assumed to end in 1\n sequence.append(1)\n\n return sequence", "def random_seq(length, nucleic_acid='DNA'):\n \n if nucleic_acid == 'DNA':\n alphabet = ('A','C','T','G')\n elif nucleic_acid == 'RNA':\n alphabet = ('A','C','U','G')\n\n so_far = ''\n for i in range(length):\n so_far += random.sample(alphabet, 1)[0]\n return so_far", "def setseq():\n\n if seqRnd[0]:\n r = getrndseq(seqRndWeight)\n if seqRnd[0] == 1:\n gbl.seqCount = r\n r = -1\n else:\n r = -1\n\n return ( r, seqRnd[1:] )", "def getRandom(self) -> int:", "def getRandom(self) -> int:", "def gen_seq(self,ntrials=20,pm_trial_position=None):\n # insert ranomly positioned pm trials\n if type(pm_trial_position)==type(None):\n ntrials -= 1+self.num_pm_trials\n pm_trial_position = np.random.randint(self.min_start_trials,ntrials,self.num_pm_trials) \n else:\n ntrials -= 1+len(pm_trial_position)\n pm_trial_position = pm_trial_position\n # generate og stim\n seq = np.random.randint(0,self.ntokens_og,ntrials)\n X = np.insert(seq,[0,*pm_trial_position],self.pm_token)\n # form Y \n Xroll = np.roll(X,self.nback)\n Y = (X == Xroll).astype(int) # nback trials\n Y[X==self.pm_token]=2 # pm trials\n return X,Y", "def generate_sequence(seq_len, query_distribution):\n\n np.random.seed()\n\n #normailze the frequencies to form a distribution\n query_ids, distribution = zip(*query_distribution)\n distribution /= sum(np.array(distribution))\n\n return np.random.choice(query_ids, size=seq_len,\n replace=True, p=distribution)", "def rseq(start=0.0, stop=1.0, N=10, randomness=0.5):\n\n return (randomness * sort(start + (stop - start) * rand(N))\n + (1 - randomness) * frange(start, stop, npts=N))", "def random_values():\n while True:\n yield random()", "def generate_seq(self):\n\n # Variable initialization\n eos = False\n c_s = 99\n x = []\n y = []\n\n while not eos:\n\n # Start of sequence\n if c_s == 99:\n # Sample from initial\n c_s = self.sample_p(self.proba[\"initial\"])\n\n # Consecutive iterations\n\n # We generate until we get length of self length\n elif len(x) < self.length:\n # Sample from transition of last state\n c_s = self.sample_p(self.proba[\"transition\"][c_s])\n\n # Generate emission\n\n # Note that we append the states as labels and observations as input\n y.append(c_s)\n x.append(self.sample_p(self.proba[\"emission\"][c_s]))\n\n else:\n eos = True\n\n # We get the state ID by offseting their idx by the length of observations\n ofs = len(self.obs)\n y = [i + ofs for i in y]\n return (x, y)", "def generate_numbers():\n\n return random.sample(range(100), 10)", "def generate(self) -> List[str]:\n\n self._reset()\n\n res = self._get_interactions(\n random.randint(self._min_seq_len, self._max_seq_len))\n\n self._add_guarded_first_named_alloc(res)\n\n if random.randint(0, 1):\n # Add some noise between source and destination\n # Is this helpful? Why?\n noise = self._get_interactions(\n random.randint(self._min_intervening_len,\n self._max_intervening_len))\n res.extend(noise)\n\n res.append(self._get_second_named_alloc())\n\n return stringify_sequence(res)", "def random(n: int) -> bytes:\n return os.urandom(n)", "def rand(self):\n return self.State.rand()", "def genNum(num, len):\n seed = ['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']\n for i in range(num):\n print ''.join(random.sample(seed, len))", "def random(self):\r\n return random.randint(1, 4)", "def totem_random():\n random_head()\n random_head()\n random_head()", "def code_generator():\n digits = [str(num) for num in range(10)]\n random.shuffle(digits)\n return digits[:3]", "def random():\n np.random.seed(1939)", "def rand_elem(seq, n=None):\n return map(random.choice, repeat(seq, n) if n is not None else repeat(seq))", "def randomSub(seed: float):\n crc = str(string.ascii_letters + string.digits)\n random.seed(seed)\n n = random.randint(10,30)\n return \"\".join(random.sample(crc, n))", "def _gen_random_number() -> float:\n return uniform(0, 1000)", "def getRandomSequence(seqLength):\n nucleotides = (\"A\", \"C\", \"G\", \"T\")\n seq = \"\"\n for i in range(seqLength):\n seq += random.choice(nucleotides)\n \n dictionary = {\"description\": \"Random sequence | \" + str(seqLength) + \"bp\", \"type\": \"dna\", \"data\" : seq}\n \n return dictionary", "def get_random(self):\n base_genom = \"1\" * sum(self._size_var)\n return utils.randomise_a_string(base_genom)", "def generate_code(self):\n seeds = \"1234567890\"\n random_str = []\n for i in range(4):\n random_str.append(choice(seeds))\n\n return \"\".join(random_str)", "def generate_seed():\n global seed\n seed = []\n\n for char_id in range(0, len(printable)):\n while True:\n char_sequence = [printable[randint(0, len(printable)-1)], printable[randint(0, len(printable)-1)]]\n if char_sequence not in seed:\n break\n seed.append(char_sequence)", "def generate_random_rot():\n from pyso3.quaternion import quat2rot\n import numpy as np\n q = np.random.randn(4)\n q = q / np.linalg.norm(q)\n return quat2rot(q)", "def generate_one_sample(dimension, sequence_length, repeat_times):\n # produce random sequence\n sequence = np.random.binomial(\n 1, 0.5, (sequence_length, dimension - 1)).astype(np.uint8)\n\n # allocate space for input sequence and output sequence\n input_sequence = np.zeros(\n (sequence_length + 1 + sequence_length * repeat_times, # + 1\n dimension),\n dtype=np.bool)\n output_sequence = np.zeros(\n (sequence_length + 1 + sequence_length * repeat_times, # + 1\n dimension),\n dtype=np.bool)\n\n # set value of input sequence\n input_sequence[:sequence_length, :-1] = sequence\n # input_sequence[sequence_length, -1] = repeat_times\n input_sequence[sequence_length, -1] = 1\n\n # set value of output sequence ## sequence_length + 1\n output_sequence[sequence_length+1:, :-1] = \\\n np.tile(sequence, (repeat_times, 1))\n # \"1\": A special flag which indicate the begin of the output\n # output_sequence[sequence_length, -1] = 1\n\n # return the sample\n return input_sequence, output_sequence", "def getRandom(self):\n return self.nums[randint(0, len(self.nums)-1)]", "def get_random(self,num):\n return ''.join(sample('abcdefghijklmnopqrstuvwxyz1234567890!',8))", "def random_number_generator(arg1, arg2):\n return 42", "def random(cls):\n return cls(os.urandom(32))", "def random_number(length=6):\n return randint(10**(length-1), (10**(length)-1))", "def random():\n return constant(1)", "def generate_random_numbers(self):\r\n #random.seed(seed=self.seed)\r\n #err = random.random((3,1))\r\n #f = open('test_res', 'a')\r\n #f.write('probability - %s' %self.seed)\r\n #f.write(str(list(err[:3,:])))\r\n #f.write('\\n')\r\n #f.close()\r\n\r\n dist = RandomDistribution(self.seed)\r\n rand_numbers = dist.return_random_variables(self.num_agents)\r\n return rand_numbers", "def i_rand_a():\n return i_random() % 95 + 32", "def generate_rng(nrngs, startseed=None):\n start_rng = np.random.RandomState(startseed)\n for i in range(nrngs):\n yield np.random.RandomState(start_rng.randint(2**32))", "def rand_gen(below, baseline):\n\treturn secrets.randbelow(below)/ baseline", "def randomNumberGenerator(self):\n #infinite loop of magical random numbers\n print(\"Making random numbers\")\n while not thread_stop_event.isSet():\n number = random.randint(10000,99999)\n print(number)\n socketio.emit('newQrCode', str(number), namespace='/test')\n time.sleep(5)", "def random():\n np.random.seed(0)", "def _generate_seq(sn):\n a, b = 0, 1\n for i in range(sn):\n yield str(a) + ' '\n a, b = b, a+b", "def get_random_sequence(genome):\n \n chr_list = get_chromosome_length(genome)\n \n random_seq = {}\n chr = random.sample(chr_list.keys(),1) #select chromosome\n slen = random.randint(300,1000) #select sequence length\n if chr_list[chr[0]] - slen > 0:\n spos = random.randint(1,chr_list[chr[0]] - slen) #select start position\n \n seq = get_fragment(genome, chr[0], slen, spos)\n if seq.count(\"N\") > 0.1 * slen:\n seq = get_random_sequence(genome)\n else:\n seq = get_random_sequence(genome)\n \n return seq", "def generator(self, args, gen):\n import random\n\n if args.seed:\n random.seed(args.seed)\n seqs = [s for s in gen]\n sample_indices = random.sample(range(len(seqs)), min(len(seqs), args.number))\n for i in sample_indices:\n yield seqs[i]", "def randkey():\n return binascii.b2a_hex(os.urandom(15))", "def genRandom(self, bits):\n _rand = 0\n _bytes = bits // 8 + 8\n\n while(len(bin(_rand))-2 < bits):\n\n try:\n _rand = int.from_bytes(random_function(_bytes), byteorder='big')\n except:\n _rand = int(random_function(_bytes).encode('hex'), 16)\n\n return _rand", "def rand(self): # Method doctring\n\n self._last_rand = xorshift32(self._last_rand, self.triple)\n return self._last_rand", "def generateRandomString():\n return ''.join(b64encode(urandom(32)).decode('utf-8'))", "def makeChrom(length):\n output = []\n for i in range(length):\n output.append(randrange(14))\n return output", "def getRandom(self) -> int:\n count = len(self.arr)\n return self.arr[randint(0, count-1)]", "def random_body():\n extra = random.randint(1, 9)\n sequence = (hex(rand32()) for _ in range(extra))\n return hex(id_) + '\\n' + '\\n'.join(sequence)", "def generator(self, random, args):\r\n raise NotImplementedError", "def random_keys(self):\n while True:\n yield self.generator.str()", "def i_random():\n global randrsl, randcnt\n\n r = randrsl[randcnt]\n randcnt += 1\n if (randcnt > 255):\n isaac_()\n randcnt = 0\n\n return r", "def gensalt():\n return hexlify(os.urandom(24)).decode()", "def genKey(length=32):\r\n return os.urandom(length)", "def sample(self):\n seq = []\n for i in range(self._pwm.shape[1]):\n p = numpy.array(self._pwm[:, i], dtype=numpy.float64)\n p /= p.sum()\n seq.extend(numpy.random.choice(self.alphabet, p=p))\n return \"\".join(seq)", "def rs():\n return random.choice([-1,1])", "def rs():\n return random.choice([-1,1])", "def genRandString(dl = 10):\n ret = ''\n for i in range(dl) :\n ret += random.choice(string.ascii_letters + string.digits)\n return ret", "def gen_rand(l):\n w = int(l / 2)\n\n min = (1 << (w - 1)) | 1\n max = (1 << w) - 1\n\n n = random.randrange(min, max) | 1\n\n return n", "def getRandom(self) -> int:\n steps = random.randint(0, self.len-1) # 随机抽取一个\n temp = self.head\n for i in range(steps):\n temp=temp.next\n return temp.val", "def random_num(self):\r\n self.generate_n1()\r\n self.generate_n2()\r\n self.generate_n3()\r\n self.generate_n4()\r\n random_number = str(self.n_1decimal)+str(self.n_2decimal)+str(self.n_3decimal)+str(self.n_4decimal)\r\n print int(random_number)", "def getRandomAngle():\r\n\treturn random.random() * math.pi * 2", "def seed_random(max_integer):\n return random.randrange(0,max_integer);", "def generator(self, random, args):\n\t\traise NotImplementedError", "def rand_ident():\n return random.randrange(MAX_IDENT)", "def gen_code():\n return ''.join([random.choice(string.ascii_uppercase + string.digits) for _ in range(10)])", "def _random_issn():\n first = randint(1000, 9999)\n second = randint(100, 999)\n return str(first) + \"-\" + str(second) + str(_select_from([1, 2, 3, 4, 5, 6, 7, 8, 9, \"X\"]))", "def generate(self):\n node = self.generate_random()\n\n while True:\n yield node.state[-1]\n if len(node.next_states) != 0:\n node = node.get_next_state()\n if node == None:\n node = self.generate_random()\n while len(node.next_states) == 0:\n node = self.generate_random()\n else:\n node = self.generate_random()\n while len(node.next_states) == 0:\n node = self.generate_random()", "def generateSequenceBias(self, bias):\n\n if bias < 0 or bias > 1:\n raise ValueError(\"Bias must be a value between 0 and 1.\")\n else:\n for i in range(self.length):\n self.sequence.append(0 if random.random() < bias else 1)\n self.biasSeq = 1\n self.bias = bias", "def random_generator(nurses_number: int = 10):\n\n # For each possible shift of all the nurses, is generated randomly a value to define as allocated or not\n state = ''\n\n # The range goes from 0 to 21*nurses_number. This happens because we every time have 21 shifts to n nurses\n for i in range(0, 21 * nurses_number):\n state = state + str(randrange(0, 2))\n\n # Return the new state generated\n return state", "def _generate_string_seq():\n input_word_num = random.randint(1, config.MAX_INPUT_WORD_NUMBER)\n return ' '.join(resources.get_random_words(input_word_num))", "def __generate_random_gene_sequence(self):\n genes = []\n for j in range(self.chromosome_size):\n genes.append(random.choice(self.gene_pool))\n\n return genes", "def my_random(a):\r\n import random\r\n r = random.randint(0, 100)\r\n return a + r", "def make_repeatable():\n random.seed(1234)\n np.random.seed(1234)", "def gen_random(\n l: list,\n n: int,\n seed: int = None\n ) -> str:\n\n # Initialisations\n s = \"\"\n\n # Loop for the desired length of the string\n for i in range(0, n):\n\n if seed is not None:\n\n numpy.random.seed(seed + i)\n\n # Append the next random character\n s += numpy.random.choice(l)\n\n return s", "def generate_custom_sequence(program, pass_space=DEFAULT_GENE_POOL,\n debug=False):\n global print_out\n print_out = debug\n return simulate_generations(pass_space, program)", "def randomSeq(n, a, b):\n \n return [\n Complex(a + np.random.random()*(b-a), a + np.random.random()*(b-a))\n for _ in range(n)\n ]", "def random_sequence(amount=100, start=0, stop=0, reverse=False):\n \n sequence = []\n if start == stop:\n for i in range(amount):\n sequence.append(stop)\n elif start < stop and not reverse: \n for i in range(amount):\n number = random.randrange(start, stop)\n sequence.append(number)\n sequence.sort()\n elif start < stop and reverse: \n for i in range(amount):\n number = random.randrange(start, stop)\n sequence.append(number)\n sequence.sort(reverse=True)\n\n elif start > stop and not reverse: \n for i in range(amount):\n number = random.randrange(stop, start)\n sequence.append(number)\n sequence.sort()\n elif start > stop and reverse: \n for i in range(amount):\n number = random.randrange(stop, start)\n sequence.append(number)\n sequence.sort(reverse=True)\n return sequence", "def getRandom(self) -> int:\n return random.choice(tuple(self.l))", "def computer_generate(self):\n return choice[random.randrange(3)]", "def rand(lo=0, hi=1):\n global Seed\n Seed = (16807 * Seed) % 2147483647\n return lo + (hi - lo) * Seed / 2147483647", "def random_gene(self):\n size = random.randint(1,50)\n gene = \"\"\n for i in range(0,size,1):\n gene+=random.choice(self.instructions)\n return gene", "def shotgenerator():\n return random.randint(0, 9), random.randint(0, 9)" ]
[ "0.7697955", "0.7600561", "0.7551118", "0.75248724", "0.7125383", "0.7092031", "0.69861037", "0.69277364", "0.6886229", "0.6868949", "0.6833825", "0.6803206", "0.67367864", "0.67331696", "0.6730156", "0.6715812", "0.6699407", "0.66590357", "0.66281706", "0.66149676", "0.65995187", "0.65995187", "0.6595084", "0.65926003", "0.6589287", "0.6526456", "0.6507666", "0.6493813", "0.64916354", "0.64704496", "0.64284474", "0.6412195", "0.6389472", "0.6383682", "0.6376732", "0.63756526", "0.63748145", "0.6361774", "0.6359661", "0.6349557", "0.6347794", "0.6343306", "0.63256556", "0.6319575", "0.6316664", "0.6297366", "0.62889344", "0.6288031", "0.62869245", "0.6272077", "0.6254106", "0.62495464", "0.62472504", "0.62420136", "0.6236023", "0.62344503", "0.62164193", "0.6213061", "0.62045175", "0.61961657", "0.6188359", "0.6182299", "0.6170367", "0.61689585", "0.61681896", "0.6154074", "0.6146085", "0.6143121", "0.6138793", "0.6134056", "0.6122844", "0.61178416", "0.6107172", "0.61066616", "0.61066616", "0.6104341", "0.61021745", "0.60951346", "0.6094966", "0.609262", "0.6077214", "0.60750395", "0.6070348", "0.60690355", "0.6063439", "0.6060368", "0.60598695", "0.6059355", "0.6054469", "0.60532415", "0.60517377", "0.6040376", "0.6034677", "0.60313475", "0.60302454", "0.60285693", "0.60275376", "0.60235995", "0.60178214", "0.6015", "0.6013653" ]
0.0
-1
Settings for a Gauss step sequence
def __init__(self, step_time, mu=None, sigma=None, n_step=None, ss=None): self.ss = ss self.n_step = n_step self.mu = mu self.sigma = sigma self.step_time = step_time
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _setVals(self, step=0):\n self.step = step", "def gauss_seidel(self):\n for i in range(1,self.size[0]-1):\n for j in range(1,self.size[1]-1):\n for k in range(1,self.size[2]-1):\n self.A[(i,j,k)] = ((1/6)*(self.A[(i+1,j,k)] + self.A[(i-1,j,k)] + self.A[(i,j+1,k)] + self.A[(i,j-1,k)] + self.A[(i,j,k+1)] + self.A[(i,j,k-1)] + self.J[(i,j,k)]) - self.A[(i,j,k)])*self.omega + self.A_0[(i,j,k)]", "def make_quad_gauss(lmax,alm):\n return libcurvedsky.bispec.make_quad_gauss(lmax,alm)", "def step(self):\n if self.defaults['max_grad_norm'] > 0:\n device = self.param_groups[0]['params'][0].device\n global_grad_norm = torch.zeros(1, device=device)\n\n max_grad_norm = torch.tensor(self.defaults['max_grad_norm'], device=device)\n for group in self.param_groups:\n\n for p in group['params']:\n if p.grad is not None:\n grad = p.grad\n global_grad_norm.add_(grad.pow(2).sum())\n\n global_grad_norm = torch.sqrt(global_grad_norm)\n\n clip_global_grad_norm = torch.clamp(max_grad_norm / (global_grad_norm + group['eps']), max=1.0)\n else:\n clip_global_grad_norm = 1.0\n\n for group in self.param_groups:\n beta1, beta2, beta3 = group['betas']\n # assume same step across group now to simplify things\n # per parameter step can be easily support by making it tensor, or pass list into kernel\n if 'step' in group:\n group['step'] += 1\n else:\n group['step'] = 1\n\n bias_correction1 = 1.0 - beta1 ** group['step']\n\n bias_correction2 = 1.0 - beta2 ** group['step']\n\n bias_correction3 = 1.0 - beta3 ** group['step']\n\n for p in group['params']:\n if p.grad is None:\n continue\n\n state = self.state[p]\n if len(state) == 0:\n state['exp_avg'] = torch.zeros_like(p)\n state['exp_avg_sq'] = torch.zeros_like(p)\n state['exp_avg_diff'] = torch.zeros_like(p)\n\n grad = p.grad.mul_(clip_global_grad_norm)\n if 'pre_grad' not in state or group['step'] == 1:\n state['pre_grad'] = grad\n\n copy_grad = grad.clone()\n\n exp_avg, exp_avg_sq, exp_avg_diff = state['exp_avg'], state['exp_avg_sq'], state['exp_avg_diff']\n diff = grad - state['pre_grad']\n\n update = grad + beta2 * diff\n exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) # m_t\n exp_avg_diff.mul_(beta2).add_(diff, alpha=1 - beta2) # diff_t\n exp_avg_sq.mul_(beta3).addcmul_(update, update, value=1 - beta3) # n_t\n\n denom = ((exp_avg_sq).sqrt() / math.sqrt(bias_correction3)).add_(group['eps'])\n update = ((exp_avg / bias_correction1 + beta2 * exp_avg_diff / bias_correction2)).div_(denom)\n\n if group['no_prox']:\n p.data.mul_(1 - group['lr'] * group['weight_decay'])\n p.add_(update, alpha=-group['lr'])\n else:\n p.add_(update, alpha=-group['lr'])\n p.data.div_(1 + group['lr'] * group['weight_decay'])\n\n state['pre_grad'] = copy_grad", "def GAStep(self):\n\n self.updateMatingPool()\n self.newGeneration()", "def __init__(self, sigmas, start=None):\n Proposal.GaussianProposal.__init__(self, unpackage(sigmas), start)", "def _set_params(self,x):\r\n assert x.size==3\r\n self.variance = x[0]\r\n self.lengthscale = x[1]\r\n self.period = x[2]\r\n\r\n self.a = [1./self.lengthscale, 1.]\r\n self.b = [1]\r\n\r\n self.basis_alpha = np.ones((self.n_basis,))\r\n self.basis_omega = np.array(sum([[i*2*np.pi/self.period]*2 for i in range(1,self.n_freq+1)],[]))\r\n self.basis_phi = np.array(sum([[-np.pi/2, 0.] for i in range(1,self.n_freq+1)],[]))\r\n\r\n self.G = self.Gram_matrix()\r\n self.Gi = np.linalg.inv(self.G)", "def __init__(self, \n frequencyR, frequencyG, frequencyB,\n phaseR, phaseG, phaseB,\n widthR=127, widthG=127, widthB=127,\n minR=127, minG=127, minB=127, \n step=np.radians(30)):\n self.frequencyR = frequencyR\n self.frequencyG = frequencyG\n self.frequencyB = frequencyB\n self.phaseR = phaseR\n self.phaseG = phaseG\n self.phaseB = phaseB\n self.x = 0\n self.step = step\n self.widthR = widthR\n self.widthG = widthG\n self.widthB = widthB\n self.minR = minR\n self.minG = minG\n self.minB = minB", "def set_grating_va(self, g, wl=None):\n try:\n self.set_grating(g)\n # By default the Shamrock library keeps the same wavelength\n if wl is not None:\n self.set_wavelength(wl)\n except Exception:\n print(\"Failed to change grating to %d\", g)", "def __init__(self, **kwargs):\n GaussBeam.__init__(self, **kwargs)\n self.scale = kwargs.get('scale',10.)\n self.mass = kwargs.get('mass', 6.0)\n self.s0 = kwargs.get('s0', 7.0)\n self.retro = kwargs.get('retro', 1.0)\n self.alpha = kwargs.get('alpha', 1.0)\n self.Er0 = Erecoil( self.l , self.mass) \n self.mW = 1000 * (self.s0 * self.Er0 ) \\\n * np.abs( np.pi / 8. / uL(self.l) )\\\n * self.w[0]*self.w[1] / self.retro", "def gaussian(self, amp_step, sigma_step):\n l = len(self.overlaid_x_axis)\n x = np.linspace(0, l, l) - l/2 # centre of data\n\n # This is new code to 'guess' the size of the Gaussian from the\n # existing data rather than from hard-coded numbers.\n # TODO: test this! Possibly link up to the get_windowed_data function\n # as it uses a lot of the same functionality\n trigger = self.pv_monitor.arrays[self.controls.Arrays.WAVEFORMS][0]\n trace = self.pv_monitor.arrays[self.controls.Arrays.WAVEFORMS][1]\n amplitude = max(trace) + amp_step\n diff = np.diff(trigger)\n stepvalue = 0.5\n if min(diff) > -1 * stepvalue or max(diff) < stepvalue:\n raise RangeError\n else:\n maxtrig = next(x for x in diff if x > stepvalue)\n mintrig = next(x for x in diff if x < -1 * stepvalue)\n edges = [np.where(diff == maxtrig)[0][0],\n np.where(diff == mintrig)[0][0]]\n half_trigger_length = (edges[1]-edges[0])\n sigma = half_trigger_length/4 + sigma_step\n\n gauss = self.ax2.plot(amplitude * np.exp(-x**2 / (2 * sigma**2)), 'r')\n self.overlaid_lines.append(gauss)\n self.draw()", "def setGammaValueFromSpinBox(self):\n self.gammaSlider.setValue( self.gammaSpinBox.value * 1000)", "def setGammaValueFromSlider(self):\n self.gammaSpinBox.setValue( self.gammaSlider.value / 1000 )", "def make_step(self):\n self.step_vals = np.cumsum(self.vals)", "def _set_params(self,x):\r\n assert x.size==3\r\n self.variance = x[0]\r\n self.lengthscale = x[1]\r\n self.period = x[2]\r\n\r\n self.a = [3./self.lengthscale**2, 2*np.sqrt(3)/self.lengthscale, 1.]\r\n self.b = [1,self.lengthscale**2/3]\r\n\r\n self.basis_alpha = np.ones((self.n_basis,))\r\n self.basis_omega = np.array(sum([[i*2*np.pi/self.period]*2 for i in range(1,self.n_freq+1)],[]))\r\n self.basis_phi = np.array(sum([[-np.pi/2, 0.] for i in range(1,self.n_freq+1)],[]))\r\n\r\n self.G = self.Gram_matrix()\r\n self.Gi = np.linalg.inv(self.G)", "def gaussian_prior(self):\n self.prior = sps.multivariate_normal(self.m0,self.S0)", "def step(self, step=None):\n\n self.qtime = -time.time()\n info(\"\\n Instanton optimization STEP %d\" % step, verbosity.low)\n\n if step == 0:\n info(\" @GEOP: Initializing INSTANTON\", verbosity.low)\n\n if self.beads.nbeads == 1:\n raise ValueError(\"We can not perform an splitting calculation with nbeads =1\")\n # get_hessian(self.hessian, self.gm, self.beads.q)\n else:\n if ((self.beads.q - self.beads.q[0]) == 0).all(): # If the coordinates in all the imaginary time slices are the same\n info(\" @GEOP: We stretch the initial geometry with an 'amplitud' of %4.2f\" % self.delta, verbosity.low)\n imvector = get_imvector(self.initial_hessian, self.beads.m3[0].flatten())\n for i in range(self.beads.nbeads):\n\n self.beads.q[i, :] += self.delta * np.cos(i * np.pi / float(self.beads.nbeads - 1)) * imvector[:]\n else:\n info(\" @GEOP: Starting from the provided geometry in the extended phase space\", verbosity.low)\n\n # Update positions and forces\n self.old_x[:] = self.beads.q\n self.old_u[:] = self.forces.pots\n self.old_f[:] = self.forces.f\n\n # This must be done after the stretching and before the self.d.\n if type(self.im.f) == type(None):\n self.im(self.beads.q, ret=False) # Init instanton mapper\n\n # Specific for LBFGS\n if np.linalg.norm(self.d) == 0.0:\n f = self.forces.f + self.im.f # ALBERTO1\n self.d += dstrip(f) / np.sqrt(np.dot(f.flatten(), f.flatten()))\n\n if (self.old_x == np.zeros((self.beads.nbeads, 3 * self.beads.natoms), float)).all():\n self.old_x[:] = self.beads.q\n\n if self.exit:\n softexit.trigger(\"Geometry optimization converged. Exiting simulation\")\n\n if len(self.fixatoms) > 0:\n for dqb in self.old_f:\n dqb[self.fixatoms * 3] = 0.0\n dqb[self.fixatoms * 3 + 1] = 0.0\n dqb[self.fixatoms * 3 + 2] = 0.0\n\n e, g = self.fm(self.beads.q)\n fdf0 = (e, g)\n\n # Do one step. Update hessian for the new position. Update the position and force inside the mapper.\n L_BFGS(self.old_x, self.d, self.fm, self.qlist, self.glist,\n fdf0, self.big_step, self.ls_options[\"tolerance\"] * self.tolerances[\"energy\"],\n self.ls_options[\"iter\"], self.corrections, self.scale, step)\n # ALBERTO2\n\n # Update positions and forces\n self.beads.q = self.gm.dbeads.q\n self.forces.transfer_forces(self.gm.dforces) # This forces the update of the forces\n\n # Exit simulation step\n d_x_max = np.amax(np.absolute(np.subtract(self.beads.q, self.old_x)))\n self.exit = self.exitstep(self.forces.pot, self.old_u.sum(), d_x_max, self.exit, step)\n\n # Update positions and forces\n self.old_x[:] = self.beads.q\n self.old_u[:] = self.forces.pots\n self.old_f[:] = self.forces.f\n\n # Print current instanton geometry and hessian\n if (self.save > 0 and np.mod(step, self.save) == 0) or self.exit:\n print_instanton_geo(self.prefix, step, self.im.dbeads.nbeads, self.im.dbeads.natoms, self.im.dbeads.names,\n self.im.dbeads.q, self.old_u, self.cell, self.energy_shift)", "def __setup_parameters__(self):\r\n self.M=self.N+1\r\n self.u=1+self.pu\r\n self.d=1-self.pd\r\n self.qu=(math.exp((self.r-self.div)*self.dt)-self.d)/(self.u-self.d)\r\n self.qd=1-self.qu", "def gauss(x, gamma):\n return 1 / np.sqrt(2*np.pi) / gamma * np.exp(-(x/gamma)**2 / 2)", "def step6_set_gan_params(params):\n global GAN_PARAMS\n GAN_PARAMS = {**GAN_PARAMS, **params}", "def _set_params(self,x):\r\n assert x.size==3\r\n self.variance = x[0]\r\n self.lengthscale = x[1]\r\n self.period = x[2]\r\n\r\n self.a = [5*np.sqrt(5)/self.lengthscale**3, 15./self.lengthscale**2,3*np.sqrt(5)/self.lengthscale, 1.]\r\n self.b = [9./8, 9*self.lengthscale**4/200., 3*self.lengthscale**2/5., 3*self.lengthscale**2/(5*8.), 3*self.lengthscale**2/(5*8.)]\r\n\r\n self.basis_alpha = np.ones((2*self.n_freq,))\r\n self.basis_omega = np.array(sum([[i*2*np.pi/self.period]*2 for i in range(1,self.n_freq+1)],[]))\r\n self.basis_phi = np.array(sum([[-np.pi/2, 0.] for i in range(1,self.n_freq+1)],[]))\r\n\r\n self.G = self.Gram_matrix()\r\n self.Gi = np.linalg.inv(self.G)", "def config_step_sweep(self):\n self.write(\":SOUR:FREQ:MODE SWE;\"\n \":SOUR:SWE:GEN STEP;\"\n \":SOUR:SWE:MODE AUTO;\")", "def FB(self):\n # The maximum update amount for these element\n StepLength_DELTA = self.dt * (self.StepLength_LIMITS[1] -\n self.StepLength_LIMITS[0]) / (6.0)\n StepVelocity_DELTA = self.dt * (self.StepVelocity_LIMITS[1] -\n self.StepVelocity_LIMITS[0]) / (2.0)\n\n # Add either positive or negative or zero delta for each\n # NOTE: 'High' is open bracket ) so the max is 1\n if self.StepLength < -self.StepLength_LIMITS[0] / 2.0:\n StepLength_DIRECTION = np.random.randint(-1, 3, 1)[0]\n elif self.StepLength > self.StepLength_LIMITS[1] / 2.0:\n StepLength_DIRECTION = np.random.randint(-2, 2, 1)[0]\n else:\n StepLength_DIRECTION = np.random.randint(-1, 2, 1)[0]\n StepVelocity_DIRECTION = np.random.randint(-1, 2, 1)[0]\n\n # Now, modify modifiable params AND CLIP\n self.StepLength += StepLength_DIRECTION * StepLength_DELTA\n self.StepLength = np.clip(self.StepLength, self.StepLength_LIMITS[0],\n self.StepLength_LIMITS[1])\n self.StepVelocity += StepVelocity_DIRECTION * StepVelocity_DELTA\n self.StepVelocity = np.clip(self.StepVelocity,\n self.StepVelocity_LIMITS[0],\n self.StepVelocity_LIMITS[1])", "def __init__(self, num_hermgauss=20):\r\n super().__init__()\r\n\r\n gh_x, gh_w = np.polynomial.hermite.hermgauss(num_hermgauss)\r\n self.gh_x = torch.nn.Parameter(\r\n torch.from_numpy(gh_x[:, None, None].astype(NUMPY_DTYPE)),\r\n requires_grad=False)\r\n self.gh_w = torch.nn.Parameter(\r\n torch.from_numpy(gh_w[:, None, None].astype(NUMPY_DTYPE)),\r\n requires_grad=False)", "def bkg_with_gauss_shoulder(fiter):\n\n \n ###bkg pdf\n fiter.bkg_indx = RooRealVar(\"indx\", \"indx\", -0.001, -0.1, 0.)\n fiter.linbkg = RooExponential(\"expBkg\", \"Exponential background pdf\", fiter.mass, fiter.bkg_indx)\n \n \n ###Shoulder\n fiter.sh_mean = RooRealVar(\"sh_mean\", \"sh_mean\", 5279.5)\n fiter.sh_sigma = RooRealVar(\"sh_sigma\", \"sh_sigma\", 20)\n \n #fiter.sh_trans.setConstant(kTRUE)\n #fiter.sh_sigma.setConstant(kTRUE)\n #fiter.sh_mean.setConstant(kTRUE)\n \n #fiter.shoulder = RooExpAndGauss(\"shoulder\", \"shoulder pdf\", fiter.mass,fiter.sh_mean, fiter.sh_sigma, fiter.sh_trans)\n fiter.shoulder = RooGaussian(\"shoulder\",\"shoulder pdf\", fiter.mass, fiter.sh_mean, fiter.sh_sigma)\n #### model\n \n fiter.fsh = RooRealVar(\"fh\", \"shoulder fraction in background\", 0.5,0.,1.)\n fiter.bkg = RooAddPdf(\"bkg\", \"background pdf\", fiter.shoulder, fiter.linbkg, fiter.fsh)\n\n \n return 1", "def step(self, step=None):\n\n self.qtime = -time.time()\n info(\"\\n Instanton optimization STEP %d\" % step, verbosity.low)\n\n if step == 0:\n info(\" @GEOP: Initializing INSTANTON\", verbosity.low)\n\n if self.beads.nbeads == 1:\n info(\" @GEOP: Classical TS search\", verbosity.low)\n if self.hessian_init == 'true':\n get_hessian(self.hessian, self.gm, self.beads.q)\n else:\n if ((self.beads.q - self.beads.q[0]) == 0).all(): # If the coordinates in all the imaginary time slices are the same\n info(\" @GEOP: We stretch the initial geometry with an 'amplitud' of %4.2f\" % self.delta, verbosity.low)\n imvector = get_imvector(self.initial_hessian, self.beads.m3[0].flatten())\n for i in range(self.beads.nbeads):\n self.beads.q[i, :] += self.delta * np.cos(i * np.pi / float(self.beads.nbeads - 1)) * imvector[:]\n if self.hessian_init != 'true':\n info(\" @GEOP: Hessian_init isn't true but we have stretched the polymer so we are going to compute the initial hessian anyway.\", verbosity.low)\n self.hessian_init = 'true'\n else:\n info(\" @GEOP: Starting from the provided geometry in the extended phase space\", verbosity.low)\n if not (self.initial_hessian is None):\n raise ValueError(\" You have to provided a hessian with size (3xnatoms)^2 but also geometry in the extended phase space (nbeads>1). Please check the inputs\\n\")\n\n if self.hessian_init == 'true':\n info(\" @GEOP: We are computing the initial hessian\", verbosity.low)\n get_hessian(self.hessian, self.gm, self.beads.q)\n\n # Update positions and forces\n self.old_x[:] = self.beads.q\n self.old_u[:] = self.forces.pots\n self.old_f[:] = self.forces.f\n\n if type(self.im.f) == type(None):\n self.im(self.beads.q, ret=False) # Init instanton mapper\n\n if (self.old_x == np.zeros((self.beads.nbeads, 3 * self.beads.natoms), float)).all():\n self.old_x[:] = self.beads.q\n if self.exit:\n softexit.trigger(\"Geometry optimization converged. Exiting simulation\")\n\n if len(self.fixatoms) > 0:\n for dqb in self.old_f:\n dqb[self.fixatoms * 3] = 0.0\n dqb[self.fixatoms * 3 + 1] = 0.0\n dqb[self.fixatoms * 3 + 2] = 0.0\n\n # Do one step. Update hessian for the new position. Update the position and force inside the mapper.\n Instanton(self.old_x, self.old_f, self.im.f, self.hessian, self.hessian_update, self.hessian_asr, self.im, self.gm, self.big_step, self.opt, self.mode)\n\n # Update positions and forces\n self.beads.q = self.gm.dbeads.q\n self.forces.transfer_forces(self.gm.dforces) # This forces the update of the forces\n\n # Print current instanton geometry and hessian\n if (self.save > 0 and np.mod(step, self.save) == 0) or self.exit:\n print_instanton_geo(self.prefix, step, self.im.dbeads.nbeads, self.im.dbeads.natoms, self.im.dbeads.names,\n self.im.dbeads.q, self.old_u, self.cell, self.energy_shift)\n print_instanton_hess(self.prefix, step, self.hessian)\n\n # Exit simulation step\n d_x_max = np.amax(np.absolute(np.subtract(self.beads.q, self.old_x)))\n self.exit = self.exitstep(self.forces.pot, self.old_u.sum(), d_x_max, self.exit, step)\n\n # Update positions and forces\n self.old_x[:] = self.beads.q\n self.old_u[:] = self.forces.pots\n self.old_f[:] = self.forces.f", "def gauss(x, x0, gamma):\n sigma = gamma / sqrt(2.0)\n \n A = 1/ (sigma * sqrt(2*pi))\n return (A * exp (-0.5 * (x-x0)**2/sigma**2))", "def phi_gauss(self,x,i):\n s = 0.1\n return np.exp(-(x-self.mu[i])**2/(2*s))", "def set_IStep(self, istep = [0], istep_sigma = [0], tstep = 5, tdur = 1e6, give_freq = True):\n if self.do_run:\n #for m in self.ic_steps:\n # m.destroy()\n # del m \n #del self.ic_steps\n \n #self.ic_steps = []\n \n istep = list(istep)\n neg = False\n \n for n in range(self.n_celltypes):\n \n if istep[n] < 0: \n neg = True\n istep[n] = abs(istep[n]) # make positive again\n \n if istep[n] != 0:\n if give_freq is True:\n a = np.array([istep[n]])\n iin = self.get_i(a, n)[0]\n if self.id == 0: print \"celltype: \", n, \" istep: \", istep[n], \"Hz => \", iin, \" nA\"\n istep[n] = iin \n \n for n in range(self.n_celltypes):\n \n for i, gid in enumerate(self.gidlist[n]): # for every cell in the gidlist \n \n np.random.seed(gid*30)\n \n if self.i_holdrs == []:\n \n if istep_sigma[n] != 0:\n istep_r = np.random.normal(istep[n], istep[n]*istep_sigma[n], 1).clip(min=0)\n else: # same ihold for all cells!\n istep_r = istep[n]\n \n else: # ihold has been set!\n \n if istep_sigma[n] != 0:\n istep_r = np.random.normal(istep[n]-self.i_holds[n], (istep[n]-self.i_holds[n])*istep_sigma[n], 1).clip(min=0) # delta now! put on top of hold!\n else: # same ihold for all cells!\n istep_r = istep[n]-self.i_holds[n] # delta now! put on top of hold!\n \n if neg:\n istep_r = -1*istep_r\n \n if istep[n] == 0:\n istep_r = -1*self.i_holdrs[n][i] \n \n #print 'is:' + str(istep_r) + 'was:' + str(self.i_holdrs[n][i])\n \n if istep_r != 0: \n # step current\n ic_step = h.IClamp(self.cells[n][i].soma(0.5))\n ic_step.delay = tstep/ms\n ic_step.dur = tdur/ms\n ic_step.amp = istep_r/nA\n self.ic_steps.append(ic_step)\n \n \n if self.id == 0: print \"set_IStep finished. istep: \", istep, \", istep_sigma: \", istep_sigma", "def setUp(self):\n shape = RNG.integers(5, 50)\n periods = self.periods = RNG.normal() * 3\n freq = periods / shape\n amp = RNG.normal()\n offset = RNG.normal()\n phase = (RNG.normal() - 1 / 2) * 5 / 3 * np.pi\n p_gt = self.p_gt = (amp, freq, phase, offset)\n x = self.x = np.arange(shape)\n self.data = sine(x, *p_gt)", "def model_setup(self):\n self.DNN = SganMLP(self.settings.number_of_bins)\n self.D = SganMLP(self.settings.number_of_bins)\n self.G = Generator()", "def ramp6(params, phase, args=dict(n=4, guess=[1, 0.053, 0.0040 , 0.4])):\n # 2013-12-07 14:08 IJMC: Created.\n\n if params[3]>=phase.min():\n params[3] = phase.min() - np.diff(phase).mean()/1e6\n \n return params[0] * (1. + params[1] * (phase - 0.5) + params[2] * np.log(phase - params[3]))", "def initializeDistribution(self):\n self.minVal = min(math.exp(self.upperBound),math.exp(self.lowerBound))\n self.maxVal = max(math.exp(self.upperBound),math.exp(self.lowerBound))", "def cmd_gaus():\n cmds = []\n cmds.append(\"r_m[0.0,-1,1]\")\n cmds.append(\"r_s[2.5,0,10]\")\n cmds.append('Gaussian::res(x,r_m,r_s)')\n return cmds", "def sequence_params(self):", "def _calpara(self):\n self.up = math.exp(self.sigma*math.sqrt(self.deltatime))\n self.down = math.exp(-self.sigma*math.sqrt(self.deltatime))\n self.upprob = (math.exp((self.r-self.d)*self.deltatime)-self.down)/(self.up-self.down)", "def task2_extra():\n N = 0\n lam = 0\n Ls = numpy.array([2*L for L in range(1,23)])\n h = 0.01\n tau = 0.000099\n\n iterss = []\n\n for L in Ls:\n a = L // 2\n print(L)\n x = numpy.linspace(-L, L, int(2*L/h) + 1)\n # eps = int(0.1 * len(x))\n\n Vm = V1D(lam, x)\n state = phi(N, x-a)\n\n iters = 0\n while True:\n prob = numpy.abs(state)**2\n mid = int(2*L/h) // 2\n # if max(prob) in prob[mid-eps:mid+eps]:\n if numpy.argmax(prob) <= mid:\n print(iters)\n iterss.append(iters)\n break\n\n state[0] = 0\n state[-1] = 0\n state = implicit_scheme_step(state, tau, h, Vm)\n iters += 1\n\n fig = plt.figure()\n plt.title(\"Iterations of Gaussian travel to center\")\n plt.xlabel(\"$L$\")\n plt.ylabel(\"Time\")\n plt.plot(Ls, tau*numpy.array(iterss))\n plt.show()\n fig.savefig(\"naloga2_iters_of_gaussian_travel.pdf\", bbox_inches=\"tight\")", "def gauss_wp(self, delta, x0=0):\n constant = (delta/np.pi)**(1/4) * (1.+0.j) \n wp = constant * np.exp((-0.5 * delta * np.square(self.grid_x - x0)))\n return wp", "def _change_spacing(self, **kwargs):\n start_point = kwargs.get(\"start_point\")\n end_point = kwargs.get(\"end_point\")\n self.t[start_point:end_point] *= kwargs.get(\"gamma\")\n self._base(**kwargs)", "def process_refine_step(self, sample, integral, integral_var, **kwargs):", "def qgset(x):\n return 0.2855*x - 0.8565", "def gamma(self):\r\n raise NotImplementedError('not implemented yet, will use spouge approximation')", "def batch_integrate_gauss(self, f, mu, sig, viewAs=None):\n x = (self.gh_s\n .unsqueeze(-1)\n .expand(-1, mu.size(0))\n .mul(torch.tensor(2., device=self.deg.device).sqrt())\n .mul(sig.view(1,-1))\n + mu.view(1,-1)\n )\n \n viewAs = viewAs if viewAs is not None else [-1, 1]\n \n return (self.gh_w.view(*viewAs)\n .mul(f(x))\n .mul(torch.tensor(1./math.pi, device=self.deg.device).sqrt())\n .sum(0)\n )", "def test_ln_gauss(self):\n Q = likelihood()\n test_object = test_values_for_likelihood()\n experimental_values = [0.9,2.1,3.2]\n errors = [0.5,0.4,0.3]\n reference_q_value = 0.27347222222222267\n reference_q_derivative = np.array([-3.75833333, -3.75833333])\n Q.add_observable(test_object,experimental_values,errors,scale=1.0)\n q_value, q_derivative = Q.compute_ln_gauss(np.array([1.0,1.0]))\n assert np.isclose(q_value,reference_q_value)\n assert np.all(np.isclose(q_derivative,reference_q_derivative))\n return", "def importance_sampling_step(self, positions, analytic):\n \"\"\"With upgrad method for suggetion of new positions.\"\"\"\n \"\"\"Given through the Langevin equation.\n D is the diffusion coefficient equal 0.5, xi is a gaussion random\n variable and delta_t is the time step between 0.001 and 0.01\"\"\"\n\n D = 0.5\n greens_function = 0.0\n\n if analytic == True:\n F_old = self.w.quantum_force(positions)\n else:\n F_old = self.w.quantum_force_numerical(positions)\n\n # r = random.random()*random.choice((-1, 1))\n # r = np.random.normal()\n r = np.zeros(self.num_d)\n for i in range(self.num_d):\n r[i] = random.gauss(0, 1)\n # Pick a random particle and calculate new position\n random_index = random.randrange(len(positions))\n new_positions = np.array(positions)\n\n term1 = D*F_old[random_index, :]*self.delta_t\n term2 = r*np.sqrt(self.delta_t)\n new_random_position = new_positions[random_index, :] + term1 + term2\n new_positions[random_index, :] = new_random_position\n\n # Check if wave function is zero\n test_wavefunction = self.w.wavefunction(new_positions)\n if test_wavefunction**2 <= 1e-14:\n pass\n else:\n prob_ratio = self.w.wavefunction_ratio(positions, new_positions)\n\n if analytic == True:\n F_new = self.w.quantum_force(new_positions)\n else:\n F_new = self.w.quantum_force_numerical(new_positions)\n\n for i in range(self.num_p):\n for j in range(self.num_d):\n term1 = 0.5*((F_old[i, j] + F_new[i, j]) *\n (positions[i, j] - new_positions[i, j]))\n term2 = D*self.delta_t*(F_old[i, j] - F_new[i, j])\n greens_function += term1 + term2\n\n greens_function = np.exp(greens_function)\n\n epsilon = np.random.sample()\n acceptance_ratio = prob_ratio*greens_function\n\n if acceptance_ratio > epsilon:\n positions = new_positions\n self.s.distances_update(positions, random_index)\n self.c += 1.0\n\n else:\n pass\n\n return positions", "def update_step_size(self):\n self.setSingleStep(10 ** self.step_exponent)\n self.update_format_string()", "def reset_parameters(self):\n mu_range = 1 / math.sqrt(self.in_features)\n self.weight_mu.data.uniform_(-mu_range, mu_range)\n self.weight_sigma.data.fill_(\n self.std_init / math.sqrt(self.in_features)\n )\n self.bias_mu.data.uniform_(-mu_range, mu_range)\n self.bias_sigma.data.fill_(\n self.std_init / math.sqrt(self.out_features)\n )", "def corun(self, step:int=0):\n if step > 0:\n _range = range(self.current_step, self.current_step + step + 1)\n else: # run forever\n _range = itertools.count(self.current_step)\n for step_num in _range:\n self.config_template = (yield self.step()) or self.config_template", "def set_gauss(fwhm):\n\n sigma = float(fwhm) / 2.3548\n\n op_string = \"-kernel gauss %f -fmean -mas \" % sigma + \"%s\"\n\n return op_string", "def set_gauss(fwhm):\n\n sigma = float(fwhm) / 2.3548\n\n op_string = \"-kernel gauss %f -fmean -mas \" % sigma + \"%s\"\n\n return op_string", "def __init__(self, quantity, dist_weights, gauss_params, upper_bound, lower_bound):\n self.dist_weights = dist_weights\n self.lower_bound = lower_bound\n self.upper_bound = upper_bound\n if len(self.dist_weights) != len(gauss_params):\n print(\n \"Number of distribution weights do not match number of distributions!\"\n )\n diff = len(gauss_params) - len(dist_weights)\n if diff < 0:\n print(\"Ignoring trailing distribution weights\")\n self.dist_weights = self.dist_weights[: len(dist_weights) + diff]\n else:\n print(\"Assuming default weights of 1\")\n self.dist_weights.extend([1] * diff)\n # normalize weights\n self.dist_weights = np.array(\n [float(i) / sum(self.dist_weights) for i in self.dist_weights]\n )\n # create samples\n self.samples = []\n self.gauss_params = gauss_params\n sample_size = quantity\n self.sample_min, self.sample_max = [float(\"inf\"), -float(\"inf\")]\n while True:\n # determine the gaussian to sample from for each sample\n mixture_idx = np.random.choice(\n len(self.dist_weights),\n size=sample_size,\n replace=True,\n p=self.dist_weights,\n )\n # create the samples from the respective gaussian\n temp = np.fromiter(\n (ss.norm.rvs(*(gauss_params[i])) for i in mixture_idx), dtype=np.float64\n )\n # remember mixed sampled extremas for plotting\n self.sample_min = min(self.sample_min, temp.min())\n self.sample_max = max(self.sample_max, temp.max())\n # add those samples that are within the bounds\n self.samples = np.concatenate(\n [\n self.samples,\n np.fromiter(\n [x for x in temp if x <= upper_bound and x >= lower_bound],\n dtype=np.float64,\n ),\n ]\n )\n sample_size = quantity - len(self.samples)\n if sample_size == 0:\n break", "def task2_extra2():\n N = 0\n lam = 0\n L = 10\n h = 0.001\n tau = 0.000099\n aa = numpy.array([0.25*a for a in range((L-1)*4)])\n x = numpy.linspace(-L, L, int(2*L/h) + 1)\n Vm = V1D(lam, x)\n # eps=int(0.1*len(x))\n\n iterss = []\n for a in aa:\n print(a)\n state = phi(N, x-a)\n\n iters = 0\n while True:\n prob = numpy.abs(state)**2\n mid = int(2*L/h) // 2\n # if max(prob) in prob[mid-eps:mid+eps]:\n if numpy.argmax(prob) <= mid:\n print(iters)\n iterss.append(iters)\n break\n\n state[0] = 0\n state[-1] = 0\n state = implicit_scheme_step(state, tau, h, Vm)\n iters += 1\n\n fig = plt.figure()\n plt.title(\"Iterations of Gaussian travel to center ($L={}$)\".format(L))\n plt.xlabel(\"$a$\")\n plt.ylabel(\"Time\")\n plt.plot(aa, tau*numpy.array(iterss))\n plt.show()\n fig.savefig(\"naloga2_iters_of_gaussian_travel_fixedL={}.pdf\".format(L), bbox_inches=\"tight\")", "def gain_opt(machine, T):\n res = (np.arange(T)+1)\n return res * np.amax(machine)", "def __init__(self,start,step,n_up=3,n_down=1,harder=-1,ub=1,lb=0):\n \n self.value = start\n self.n_up = n_up\n self.step = step\n self.n = 0 #This is what will be compared to n_up for udpating.\n self.harder = np.sign(harder) #Make sure that this is only -1 or 1.\n self.record = [start]\n self.correct = []\n self.ub = ub\n self.lb = lb", "def _eta_sfr_scaling(self,x,q):\n i = self.enum[q]\n A = self.scaling_params['A'][i]\n b = self.scaling_params['b'][i]\n return A*x**b", "def __init__(self, step_time, step_interval=None, n_step=None, ss=None):\n self.ss = ss\n self.n_step = n_step\n self.interval = step_interval\n self.step_time = step_time", "def setseq():\n\n if seqRnd[0]:\n r = getrndseq(seqRndWeight)\n if seqRnd[0] == 1:\n gbl.seqCount = r\n r = -1\n else:\n r = -1\n\n return ( r, seqRnd[1:] )", "def step(self):\n\n with torch.no_grad():\n for group in self.param_groups:\n lr = group[\"lr\"]\n for p in group[\"params\"]:\n\n if p.grad is None:\n continue\n\n lambda_square = self.mf.conf_factor(p, keepdim=True) ** 2\n p.data.copy_(self.mf.exp(p, -lr * p.grad.data / lambda_square))", "def __init__(self):\n self.grad = 0.0", "def mutate_trace_gauss_saes(solver, t, sds, mutation_chance=None):\n if mutation_chance is None:\n mutation_chance = solver.alg_params.mutation_rate\n\n p = t.trace\n for i in range(len(p)):\n r = random.random()\n if r < mutation_chance:\n # p[i] = sample_gauss(p[i], sds[i])\n # p[i] = p[i] + sds[i] * random.gauss(0, 1)\n # p[i] = max(0, min(p[i], 1))\n p[i] = get_trunc_norm(p[i], sds[i])\n t2 = solver.create_solution_with_raw_trace(p)\n t2.es_params = fix_saes_params(solver, t2.trace, t.es_params)\n return t2", "def M_g(self):\n\n print(\"\", file=self.logfile)\n print(\"Updating g\", file=self.logfile)\n M_mu1 = np.lib.stride_tricks.as_strided(self.mu_pad,\n shape=[self.P+1, self.L_h],\n strides=[self.mu_pad.strides[-1], self.mu_pad.strides[-1]])\n\n M_mu1 = M_mu1[::-1,:]\n M_mu2 = np.transpose(M_mu1[1:,:])\n M_mu1 = M_mu1*self.e2\n\n M_mu = np.dot(M_mu1, M_mu2)\n v_mu = M_mu[0,:]\n M_mu = M_mu[1:,:]\n\n M_R = np.zeros((self.P,self.P+1))\n for p in range(1,self.P+1):\n for q in range(0,self.P+1):\n M_R[p-1,q] = np.sum(np.diag(self.R, q-p)[:self.L_h-max(p,q)]*self.e2[max(p,q):self.L_h])\n\n v_R = M_R[:,0]\n M_R = M_R[:,1:]\n\n self.alpha_g = np.dot(np.linalg.inv(M_mu + M_R), v_mu+v_R)\n self.A = np.concatenate([[1], -self.alpha_g])\n\n self._propagate_A()", "def _gammaParameter(self, t : float, dtau : float) -> float:\n pass", "def kgs_changed(self):\n lbs = round(self.spinKgs.value() * 2.20462, 1)\n self.spinLbs.setValue(lbs)", "def reset_parameters_xavier_uniform(self):\n logger.info('===== Initialize %s with Xavier uniform distribution =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n init_with_xavier_uniform(n, p)", "def reset_parameters_xavier_uniform(self):\n logger.info('===== Initialize %s with Xavier uniform distribution =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n init_with_xavier_uniform(n, p)", "def reset_parameters(self):\n logger.info('===== Initialize %s with Xavier uniform distribution =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n init_with_xavier_uniform(n, p)", "def reset_parameters(self):\n logger.info('===== Initialize %s with Xavier uniform distribution =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n init_with_xavier_uniform(n, p)", "def reset_parameters(self) -> None:\n nn.init.uniform_(self.start_transitions, -0.1, 0.1)\n nn.init.uniform_(self.end_transitions, -0.1, 0.1)\n nn.init.uniform_(self.transitions, -0.1, 0.1)", "def reset_parameters(self) -> None:\n nn.init.uniform_(self.start_transitions, -0.1, 0.1)\n nn.init.uniform_(self.end_transitions, -0.1, 0.1)\n nn.init.uniform_(self.transitions, -0.1, 0.1)", "def run(self,step=2,\n sizePop=100,\n infoFields=['migrate_to','fitness'],\n recombination_rate = 0.00375,\n migration_rate = 0.01,\n mutation_rate = [0.00000001],\n subPopNames = ['x','y','z','w'],\n alleleNames = ['A','B'],\n s1 = 0.1,\n burnin=50,\n **kargs):\n\n self.reset()\n pop=sim.Population(size=[sizePop]*self.numPop, loci=self.loci, lociPos=list(range(self.dist, (self.dist*self.loci)+1,self.dist)), subPopNames=subPopNames, infoFields=infoFields)\n \n simu = sim.Simulator(pop)\n print(\"The simulation has started\")\n t1 = time.time()\n\n\n mutate_snps=range(0,50)+range(51,101)\n\n # define the initialization of each loci based the beta distribution where a and b parameters are allele frequencies from noncoding human regions\n snps=[0.14, 0.11, 0.17, 0.11, 0.32, 0.33, 0.21, 0.11, 0.11, 0.28, 0.11, 0.12, 0.8, 0.66, 0.74, 0.68, 0.66, 0.77, 0.77, 0.76, 0.77, 0.74, 0.72, 0.11, 0.73, 0.72, 0.72, 0.72, 0.54, 0.17, 0.78, 0.64, 0.78, 0.2, 0.24, 0.25, 0.78, 0.66, 0.2, 0.14, 0.75, 0.16, 0.72, 0.18, 0.77, 0.42, 0.34, 0.7, 0.17, 0.14, 0.2, 0.46, 0.13, 0.26, 0.16, 0.13, 0.14, 0.24, 0.18, 0.36, 0.71, 0.27, 0.28, 0.25, 0.25, 0.3, 0.19, 0.14, 0.16, 0.3, 0.39, 0.16, 0.24, 0.32, 0.11, 0.18, 0.48, 0.31, 0.21, 0.15, 0.34, 0.71, 0.33, 0.18, 0.71, 0.13, 0.23, 0.2, 0.22, 0.23, 0.16, 0.23, 0.23, 0.22, 0.24, 0.82, 0.36, 0.37, 0.72, 0.16, 0.14]\n self.initFreq=[]\n\n \n for i in range(len(snps)):\n alpha=float(4*sizePop*migration_rate*snps[i])\n bhta=float(4*sizePop*migration_rate*(1-snps[i])) \n p=numpy.random.beta(alpha,bhta)\n while (p>=0.9 or p<=0.1):\n p=numpy.random.beta(alpha,bhta)\n \n print \" SNP {snp} with alpha {alpha}, bhta {bhta} and frequency {p}\".format(snp=i, alpha=alpha, bhta=bhta, p=p)\n self.initFreq.append(p)\n\n simu.evolve(\n \n initOps=[sim.InitGenotype(freq=[self.initFreq[i], 1-self.initFreq[i]], loci=i) for i in range(len(snps))],\n \n\n # initialize the sex and select the 50 loci (parents)\n preOps = [sim.InitSex(maleProp=0.5,at=[0]),\n\n # initialize the genotype of locus 50 at generation 0 (in the beginning of the simulation)\n sim.PyOperator(self.genotypeBegin,at=[0]),\n \n # Wait 50 generations for the system to reach equilibrium\n # Then, change the the genotype of locus 50 at generation 50 by inserting a single copy of allele 0 in one individual \n sim.PyOperator(self.genotypeAfter,at=[50]),\n\n # function that carries out the selection proccess\n sim.MaSelector(loci=50,wildtype=0,fitness=[1+s1, 1+s1/2, 1],begin=50, end=-1,subPops=1)],\n\n # recombination\n matingScheme=sim.RandomMating(ops=[\n sim.Recombinator(rates=recombination_rate)]),\n \n # mutation and migration of offsprings\n postOps = [\n\n \n sim.SNPMutator(u=mutation_rate,loci=mutate_snps),\n \n # call function to calculate Fst and check for equilibrium state\n sim.PyOperator(self.calcFst,step=step),\n\n #migration\n # Here we define an island model, but this can easily be changed.\n # For more information about the migration models, please look in the documentation of SimuPOP here http://simupop.sourceforge.net/manual_svn/build/userGuide_ch7_sec3.html\n sim.Migrator(sim.utils.migrIslandRates(migration_rate,self.numPop)),\n \n # call function to save the allele frequencies\n sim.PyOperator(self.checkAlleles, step=step, param = subPopNames),\n \n \n # check if locus 50 is lost due to genetic drift. If yes, we terminate the simulation\n sim.Stat(alleleFreq=50,step=step,subPops=1,begin=50,end=-1),\n sim.TerminateIf('alleleFreq[50][0] == 0',step=step,begin=50,end=-1),\n \n # check the progress of the simulation\n sim.PyEval('\"Gen: %d\" % gen',step=step),\n sim.PyOutput('\\n',step=step),\n \n ],\n gen=self.Gen\n \n )\n \n \n t2 = time.time()\n print \"simulation took\", t2-t1, \"seconds.\"", "def set_gamma(self, name):\n if name=='1H':\n self.gamma=267.513e6*self.time_fact\n #In standard freq: 42.576e6\n elif name=='13C':\n self.gamma=67.262e6*self.time_fact\n # Good average unknown\n self.csa=-130e-6\n elif name=='15N':\n self.gamma=-27.116e6*self.time_fact\n # Large variation possible. See Fushman, Tjandra, and Cowburn, 1998.\n # Also use more commonly accepted value.\n self.csa=-170e-6\n elif name=='17O':\n self.gamma=-36.264e6*self.time_fact\n elif name=='19F':\n self.gamma=251.662e6*self.time_fact\n elif name=='31P':\n self.gamma=108.291e6*self.time_fact", "def gauss(x, *p):\n A, mu, sigma = p\n\n return A*np.exp(-(x-mu)**2/(2.*sigma**2))", "def _tune(acc_rate, proposed, step):\n if step.tune_scaling:\n # a and b after Muto & Beck 2008.\n a = 1 / 9\n b = 8 / 9\n step.scaling = (a + b * acc_rate) ** 2\n if step.tune_steps:\n acc_rate = max(1.0 / proposed, acc_rate)\n step.n_steps = min(step.max_steps, 1 + int(np.log(step.p_acc_rate) / np.log(1 - acc_rate)))", "def regular(step, start=0.):\n\n def output(low, high):\n newstart = math.ceil((low - start)/step) * step + start\n return numpy.arange(newstart, high, step, dtype=numpy.float)\n output.func_name = \"regular(%g, start=%g)\" % (step, start)\n return output", "def initialize_cell_cycles(self,g_av=1,g_sig=0.2):\n # self.tc0 = np.random.uniform(0,1,self.nc)\n self.g_av = g_av\n self.g_sig = g_sig\n self.tc = np.random.uniform(0,1,self.nc)\n self.g = np.random.normal(self.g_av,self.g_sig,self.nc)", "def set_sampling(self, fgrid=0):\n self.FGRID = fgrid", "def __init__(self, initial_value, n_values, schedule):\n self.step = 0.\n self.initial_value = initial_value\n self.nvalues = n_values\n self.schedule = SCHEDULES[schedule]", "def __init__(self, GeneratingUnit=None, *args, **kw_args):\n self._GeneratingUnit = None\n self.GeneratingUnit = GeneratingUnit\n\n super(GrossToNetActivePowerCurve, self).__init__(*args, **kw_args)", "def spin_gap(B, g_ex):\n\n return mu_bohr * g_ex * B / k_b", "def __init__(self, routine, *args, **kwargs):\n\n self.kw = kwargs\n Step.__init__(self, routine=routine, *args, **kwargs)\n qscale_settings = self.parse_settings(self.get_requested_settings())\n qbcal.QScale.__init__(self, dev=self.dev, **qscale_settings)", "def uniformSample (self) :\n S = self.mdp.S\n A = self.mdp.A\n\n for s, a in product(range(S), range(A)):\n s_, self.R[s, a] = self.mdp.step(s, a)\n self.updateVisitStatistics(s, a, s_)", "def gibbs_init(self, sigma2_s_param=None, sigma2_g_param=None):\n #Gibbs : Initialization step\n self.gibbs_init_step(self.nb_days, self.nb_particles, sigma2_s_param, sigma2_g_param)\n\n #Gibbs : step t > 0\n for j in range(1, self.nb_particles):\n if(j%(self.nb_particles/10)==0 or j==1):\n print(\"Gibbs sampling for particle \" + str(j) + \"/\" + str(self.nb_particles))\n\n\n self.s[:,j] = self.s[:,j-1]\n self.g_heat[:,j] = self.g_heat[:,j-1]\n self.sigma_s_star_2[:,j] = self.sigma_s_star_2[:,j-1]\n self.sigma_g_star_2[:,j] = self.sigma_g_star_2[:,j-1]\n\n # Compute s[0] for particle j (j>0)\n self.compute_s_0(j)\n\n # Compute s[n] for particle j (n>0 and j>0)\n for i in range(1, self.nb_days):\n self.compute_s(i,j)\n\n # Compute g_heat[O] for particle j (and j>0)\n self.compute_g_0(j)\n\n # Compute g_heat[n] for particle j (n>0 and j>0)\n for i in range(1, self.nb_days):\n self.compute_g(i,j)\n\n shape = 0.01 + ((self.nb_days - 1)/2)\n # Compute the new sigma_s_star2 for particle j (j>0) (follow Inverse Gamma)\n self.sigma_s_star_2[0, j] = self.compute_sigma_star_2(shape, self.s, j)\n\n # Compute the new sigma_g_star2 for particle j (j>0) (follow Inverse Gamma)\n self.sigma_s_star_2[0, j] = self.compute_sigma_star_2(shape, self.g_heat, j)\n\n #Compute x\n self.compute_x()\n #Compute w\n self.compute_w()", "def gauss_rule(iel, elemType, normal_order, element):\n\n #sctr = element[iel, :] # element connectivity\n\n if ((elemType == 'Q4') and (normal_order <8)):\n W, Q = gpw.gauss_pt_wt(normal_order,'GAUSS',2)\n return W, Q", "def gauss(x,p):\n return np.exp((-(x - p[0])**2) / (2 * p[1]**2))", "def param_gauss(xdata_, *params_):\n scale_, mean_, cov_ = params_to_scale_mean_cov(params_)\n return scale_ * gaussian(xdata_, mean=mean_, cov=cov_)", "def __init__(self, low, high, step_name, variable_name):\n super().__init__(step_name, variable_name, sp_uniform(low, high - low))\n self.low = min(low, high)\n self.high = max(low, high)", "def step_adjust_learning_rate(optimizer, lr0, step, step_size, gamma):\n\n if len(step_size) == 0:\n lr = lr0 * (gamma ** (step // step_size))\n else:\n lr = lr0 * gamma ** (sum([step > i for i in step_size]))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return lr", "def gaussKern(size):\n size = int(size)\n x, y = np.mgrid[-size:size + 1, -size:size + 1]\n g = np.exp(-(x**2/float(size) + y**2/float(size)))\n return g / g.sum()", "def __init__(self, low=0.0, alpha=0.0, beta=1.0):\n super().__init__()\n self.low = low\n self.alpha = alpha\n self.beta = beta\n self.type = 'Gamma'\n self.distType = 'Continuous'\n self.hasInfiniteBound = True\n self.compatibleQuadrature.append('Laguerre')\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'Laguerre'\n self.preferredPolynomials = 'Laguerre'", "def __init__(self, step_time, saw_time, step_interval=None, n_step=None, ss=None):\n self.ss = ss\n self.n_step = n_step\n self.interval = step_interval\n self.step_time = step_time\n self.saw_time = saw_time", "def initializeDistribution(self):\n self.convertToDistrDict['Laguerre'] = self.convertLaguerreToGamma\n self.convertToQuadDict ['Laguerre'] = self.convertGammaToLaguerre\n self.measureNormDict ['Laguerre'] = self.stdProbabilityNorm\n if (not self.upperBoundUsed):\n # and (not self.lowerBoundUsed):\n self._distribution = distribution1D.BasicGammaDistribution(self.alpha,1.0/self.beta,self.low)\n #self.lowerBoundUsed = 0.0\n self.upperBound = sys.float_info.max\n self.preferredQuadrature = 'Laguerre'\n self.preferredPolynomials = 'Laguerre'\n else:\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'Legendre'\n if self.lowerBoundUsed == False:\n a = 0.0\n self.lowerBound = a\n else:\n a = self.lowerBound\n if self.upperBoundUsed == False:\n b = sys.float_info.max\n self.upperBound = b\n else:\n b = self.upperBound\n self._distribution = distribution1D.BasicGammaDistribution(self.alpha,1.0/self.beta,self.low,a,b)", "def adjust_learning_rate(optimizer, gamma, step):\n lr = args.lr * (0.8 ** step)\n print(lr)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr", "def _set_steps(self, bounds, steps):\n if type(steps) == int:\n self.steps = [np.linspace(b1,b2,steps) for b1,b2 in bounds]\n elif type(steps) == list and type(steps[0]) == int:\n self.steps = [np.linspace(b1, b2, s) for (b1, b2), s in zip(bounds, steps)]\n else:\n self.steps = steps.copy()", "def train_step(self, experiences, gamma):\n raise NotImplementedError", "def build_gp(amplitude, length_scale):\n kernel = tfk.ExponentiatedQuadratic(amplitude, length_scale)\n return tfd.GaussianProcess(kernel=kernel, index_points=x, observation_noise_variance=0.0)", "def __init__(self, policy, base_rate, gamma, power, max_steps, step_values):\n self.policy = policy\n self.base_rate = base_rate\n self.gamma = gamma\n self.power = power\n self.max_steps = max_steps\n self.step_values = step_values\n if self.step_values:\n self.stepvalues_list = map(float, step_values.split(','))\n else:\n self.stepvalues_list = []\n\n if (self.max_steps < len(self.stepvalues_list)):\n self.policy = 'step'\n self.stepvalues_list[0] = 1\n logging.info(\"Maximum iterations (i.e., %s) is less than provided step values count \"\n \"(i.e, %s), so learning rate policy is reset to (%s) policy with the \"\n \"step value (%s).\",\n self.max_steps, len(self.stepvalues_list),\n self.policy,\n self.stepvalues_list[0])\n else: # Converting stepsize percentages into values\n for i in range(len(self.stepvalues_list)):\n self.stepvalues_list[i] = round(self.max_steps * self.stepvalues_list[i] / 100)\n # Avoids 'nan' values during learning rate calculation\n if self.stepvalues_list[i] == 0:\n self.stepvalues_list[i] = 1\n\n if (self.policy == 'step') or (self.policy == 'sigmoid'):\n # If the policy is not multistep, then even though multiple step values\n # are provided as input, we will consider only the first value.\n self.step_size = self.stepvalues_list[0]\n elif (self.policy == 'multistep'):\n self.current_step = 0 # This counter is important to take arbitary steps\n self.stepvalue_size = len(self.stepvalues_list)", "def custom_init(v_init=-60.):\n inittime = -1e10\n tdt = neuron.h.dt # save current step size\n dtstep = 1e9\n neuron.h.finitialize(v_init) \n neuron.h.t = inittime # set time to large negative value (avoid activating\n # point processes, we hope)\n tmp = neuron.h.cvode.active() # check state of variable step integrator\n if tmp != 0: # turn off CVode variable step integrator if it was active\n neuron.h.cvode.active(0) # now just use backward Euler with large step\n neuron.h.dt = dtstep\n n = 0\n while neuron.h.t < -1e9: # Step forward\n neuron.h.fadvance()\n n += 1\n #print('advances: ', n)\n if tmp != 0:\n neuron.h.cvode.active(1) # restore integrator\n neuron.h.t = 0\n if neuron.h.cvode.active():\n neuron.h.cvode.re_init() # update d(state)/dt and currents\n else:\n neuron.h.fcurrent() # recalculate currents\n neuron.h.frecord_init() # save new state variables\n neuron.h.dt = tdt # restore original time step", "def set_params(self):\n max_margin = int(self.alpha) + 1\n self.sample_params['add'] = [0, max_margin, max_margin]", "def gauss(x, *p):\n mu, sigma = p\n return (1 / (sigma * np.sqrt(2 * np.pi)) *\n np.exp(-(x - mu) ** 2 / (2. * sigma ** 2)))", "def __init__(self, initial_step_size=0.01 * units.angstroms):\n\n timestep = 1.0 * units.femtoseconds\n super(GradientDescentMinimizationIntegrator, self).__init__(timestep)\n\n self.addGlobalVariable(\"step_size\", initial_step_size / units.nanometers)\n self.addGlobalVariable(\"energy_old\", 0)\n self.addGlobalVariable(\"energy_new\", 0)\n self.addGlobalVariable(\"delta_energy\", 0)\n self.addGlobalVariable(\"accept\", 0)\n self.addGlobalVariable(\"fnorm2\", 0)\n self.addPerDofVariable(\"x_old\", 0)\n\n # Update context state.\n self.addUpdateContextState()\n\n # Constrain positions.\n self.addConstrainPositions()\n\n # Store old energy and positions.\n self.addComputeGlobal(\"energy_old\", \"energy\")\n self.addComputePerDof(\"x_old\", \"x\")\n\n # Compute sum of squared norm.\n self.addComputeSum(\"fnorm2\", \"f^2\")\n\n # Take step.\n self.addComputePerDof(\"x\", \"x+step_size*f/sqrt(fnorm2 + delta(fnorm2))\")\n self.addConstrainPositions()\n\n # Ensure we only keep steps that go downhill in energy.\n self.addComputeGlobal(\"energy_new\", \"energy\")\n self.addComputeGlobal(\"delta_energy\", \"energy_new-energy_old\")\n # Accept also checks for NaN\n self.addComputeGlobal(\"accept\", \"step(-delta_energy) * delta(energy - energy_new)\")\n\n self.addComputePerDof(\"x\", \"accept*x + (1-accept)*x_old\")\n\n # Update step size.\n self.addComputeGlobal(\"step_size\", \"step_size * (2.0*accept + 0.5*(1-accept))\")" ]
[ "0.6018765", "0.57911164", "0.5601025", "0.55696154", "0.5559649", "0.55246335", "0.55087584", "0.55051965", "0.5472542", "0.5460051", "0.5456584", "0.5456021", "0.5453433", "0.54386294", "0.5438098", "0.541166", "0.53872263", "0.5383193", "0.53732735", "0.5372562", "0.5364542", "0.5358234", "0.5343056", "0.5339872", "0.5311707", "0.5285264", "0.52737737", "0.52731436", "0.52537537", "0.5246283", "0.5236963", "0.5230409", "0.5229118", "0.5225128", "0.5176369", "0.51625586", "0.5153728", "0.5149973", "0.51462", "0.51444733", "0.5131565", "0.512598", "0.51163775", "0.5111873", "0.5107915", "0.5106734", "0.5106668", "0.5064345", "0.50625634", "0.50625634", "0.5052902", "0.50383115", "0.5035673", "0.50324476", "0.5030275", "0.50285774", "0.5002253", "0.49950504", "0.49944335", "0.49812308", "0.49765062", "0.49746084", "0.4974507", "0.49687734", "0.49687734", "0.49663186", "0.49663186", "0.49588075", "0.49588075", "0.49539787", "0.4950228", "0.49481994", "0.494712", "0.49447575", "0.49387115", "0.4933357", "0.4931797", "0.49295625", "0.49280927", "0.49275082", "0.49215186", "0.49175835", "0.49098492", "0.49065867", "0.49051654", "0.49012256", "0.4901049", "0.48935264", "0.48871073", "0.48848784", "0.48829275", "0.4878583", "0.48705882", "0.48668724", "0.486585", "0.48615938", "0.4860804", "0.48595002", "0.48558065", "0.48555443" ]
0.5731074
2
Generate a Gauss sequence
def out(self, t: any, dim=(None, None)) -> any: step_vector = np.abs([round(gauss(self.mu, self.sigma), 1) for _ in range(self.n_step)]) u = np.zeros(shape=dim) j = 0 for i in range(len(t)): # Excluding the last point if t[i] % self.step_time == 0 and t[i] != 0 and j+1 != len(step_vector) and i != len(t)-1: # No last step j += 1 if self.ss is not None and j == 0: u[i, :] = self.ss else: u[i, :] = step_vector[j] return u
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gauss(x, x0, gamma):\n sigma = gamma / sqrt(2.0)\n \n A = 1/ (sigma * sqrt(2*pi))\n return (A * exp (-0.5 * (x-x0)**2/sigma**2))", "def gauss(x, gamma):\n return 1 / np.sqrt(2*np.pi) / gamma * np.exp(-(x/gamma)**2 / 2)", "def phi_gauss(self,x,i):\n s = 0.1\n return np.exp(-(x-self.mu[i])**2/(2*s))", "def gauss(x, *p):\n A, mu, sigma = p\n\n return A*np.exp(-(x-mu)**2/(2.*sigma**2))", "def gauss(x,p):\n return np.exp((-(x - p[0])**2) / (2 * p[1]**2))", "def gauss(x, *p):\n mu, sigma = p\n return (1 / (sigma * np.sqrt(2 * np.pi)) *\n np.exp(-(x - mu) ** 2 / (2. * sigma ** 2)))", "def make_quad_gauss(lmax,alm):\n return libcurvedsky.bispec.make_quad_gauss(lmax,alm)", "def rand_gauss(n=100, mu=[1, 1], sigma=[0.1, 0.1]):\n d = len(mu)\n res = np.random.randn(n, d)\n return np.array(res * sigma + mu)", "def gauss_seidel(self):\n for i in range(1,self.size[0]-1):\n for j in range(1,self.size[1]-1):\n for k in range(1,self.size[2]-1):\n self.A[(i,j,k)] = ((1/6)*(self.A[(i+1,j,k)] + self.A[(i-1,j,k)] + self.A[(i,j+1,k)] + self.A[(i,j-1,k)] + self.A[(i,j,k+1)] + self.A[(i,j,k-1)] + self.J[(i,j,k)]) - self.A[(i,j,k)])*self.omega + self.A_0[(i,j,k)]", "def gauss(self, X, xm, amp, w):\n return amp * np.exp(-((X - xm) / w) ** 2)", "def fun_gauss_gauss(p,r):\n return p[1] * np.exp(-((r/p[0])**2)) + p[3] * np.exp(-((r/p[2])**2))", "def gauss_sample(mean, covariance):\n\n return None", "def fun_gauss(p,r):\n return p[1] * np.exp(-((r/p[0])**2))", "def trunc_gauss(mu, sigma, bottom, top):\n a = random.gauss(mu, sigma)\n while (bottom <= a <= top) is False:\n a = random.gauss(mu, sigma)\n return int(a)", "def sample_gauss(mean, sd, lower=0, upper=1):\n r = random.gauss(mean, sd)\n while not (lower < r < upper):\n r = random.gauss(mean, sd)\n return r", "def generation(x, g):\n return int(x/g)", "def gaussKern(size):\n size = int(size)\n x, y = np.mgrid[-size:size + 1, -size:size + 1]\n g = np.exp(-(x**2/float(size) + y**2/float(size)))\n return g / g.sum()", "def G(U):\n n = U.shape[0]\n G_U = np.zeros([n,1])\n DELTA_x = float(2*L)/(n-1)\n for i in range(n):\n G_U[i][0] = U[(i+1)%n][0]\n G_U[i][0] -= U[(i-1)%n][0]\n G_U[i][0] /= (2* DELTA_x)\n G_U[i][0] += (float(epsilon) * (U[(i+1)%n][0]- U[(i-1)%n][0]) * (U[(i-1)%n][0]+U[(i+1)%n][0]+ U[i][0])/ (4* DELTA_x))\n G_U[i][0] += (float(epsilon) * (U[(i+2)%n][0]- 2*U[(i+1)%n][0]+ 2*U[(i-1)%n][0]- U[(i-2)%n][0]) / (12*( DELTA_x**3)))\n return G_U", "def batch_integrate_gauss(self, f, mu, sig, viewAs=None):\n x = (self.gh_s\n .unsqueeze(-1)\n .expand(-1, mu.size(0))\n .mul(torch.tensor(2., device=self.deg.device).sqrt())\n .mul(sig.view(1,-1))\n + mu.view(1,-1)\n )\n \n viewAs = viewAs if viewAs is not None else [-1, 1]\n \n return (self.gh_w.view(*viewAs)\n .mul(f(x))\n .mul(torch.tensor(1./math.pi, device=self.deg.device).sqrt())\n .sum(0)\n )", "def gauss_sample(num, stdev):\n sample = np.random.normal(0, stdev, num)\n sample = sample.round().astype(int)\n return sample", "def gauss(x, y, ax, ay, x0, y0, phase):\n g_x = ((ax / sqrt(pi)) ** 0.5\n * exp(-0.5 * ((x - x0) * ax) ** 2))\n g_y = ((ay / sqrt(pi)) ** 0.5\n * exp(-0.5 * ((y - y0) * ay) ** 2))\n\n gxy = np.zeros((len(x),len(y)), dtype=float)\n for i, _gx in enumerate(g_x):\n for j, _gy in enumerate(g_y):\n gxy[i,j] = _gx * _gy \n\n gxy2 = (1.0 / sqrt(1.0+abs(phase))) * np.array([gxy, phase*gxy], dtype=float) \n\n return gxy2", "def gauss(self, k, m, z):\n Rvir = self.U.rVir(m, z)\n result = np.exp(-0.5 * Rvir**2 * k**2)\n return result", "def rand_sample_gauss():\n mean = float(NUM_UNIQUE_VALUES + 1) / 2\n while True:\n r = random.normalvariate(mean, DIST_PARAM)\n value = int(round(r))\n # Rejection sampling to cut off Gaussian to within [1, NUM_UNIQUE_VALUES]\n if 1 <= value <= NUM_UNIQUE_VALUES:\n break\n\n return value # true client value", "def gauss_term_fn(iteration_count, v, z):\n return tf.math.square(z) / 4. / (\n (v + iteration_count - 1) * (v + iteration_count))", "def inv_gauss_int(p):\n #Brute force approach. Limited accuracy for >3sigma\n #find something better \n #DO NOT USE IN LOOPS (very slow)\n if p<0. or p>1.:\n print('Wrong value for p(',p,')!')\n sys.exit()\n step=.00001\n xn=arange(0.,4.+step,step)\n gn=1./sqrt(2.*pi)*exp(-xn**2/2.)\n cgn=add.accumulate(gn)*step\n p=p/2.\n ind=searchsorted(cgn,p)\n return xn[ind]", "def gaussum(xdata,*params):\n\tamp = num.zeros(0)\n\tcen = num.zeros(0)\n\tstdv = num.zeros(0)\n\n\tfor i in range(0, len(params), 3): #This section is just unpacking the parameter array into amps, cens, and stdvs\n\t\tx = params[i]\n\t\tamp = num.append(amp,x)\n\t\ty = params[i+1]\n\t\tcen = num.append(cen,y)\n\t\tz = params[i+2]\n\t\tstdv = num.append(stdv,z)\n\tglobal storage #You may not need storage to be global so think about taking this part out. storage stores the data\n\tstorage = [[0 for x in range(1)] for x in range(len(params)/3)] #from each iteration of the gaussian equation into\n\tfor i in range(len(params)/3):#individual rows. So row one will be the gaussian solutions to the first peak and so on\n\t\tstorage[i] = gaus(xdata,amp[i],cen[i],stdv[i])\n\tstorage = num.asarray(storage)\n\treturn sum(storage)", "def gauss(self, *x, cut=None, rescaled_to_max=False):\n\n mean, std = self._meanStdCut(cut=cut)\n\n if rescaled_to_max: norm = 1\n else: norm = np.sqrt(2*np.pi*(std**2))\n\n gauss = lambda y: (\n np.exp(-((y - mean)**2)/(2*(std**2)))\n /norm)\n\n return np.array(list(map(gauss, x)))", "def gausspix(x, mean=0.0, sigma=1.0):\n edges = np.concatenate((x-0.5, x[-1:]+0.5))\n integrals = gaussint(edges, mean=mean, sigma=sigma)\n return integrals[1:] - integrals[0:-1]", "def gauss_kernel(sigma, sample_rate, duration):\n l = duration * sample_rate\n x = np.arange(-np.floor(l / 2), np.floor(l / 2)) / sample_rate\n y = (1 / (np.sqrt(2 * np.pi) * sigma)) * np.exp(-(x ** 2 / (2 * sigma ** 2)))\n y /= np.sum(y)\n return y", "def rand_bi_gauss(n1=100, n2=100, mu1=[1, 1], mu2=[-1, -1], sigma1=[0.1, 0.1],\n sigma2=[0.1, 0.1]):\n ex1 = rand_gauss(n1, mu1, sigma1)\n ex2 = rand_gauss(n2, mu2, sigma2)\n res = np.vstack([np.hstack([ex1, 1. * np.ones((n1, 1))]),\n np.hstack([ex2, 2. * np.ones((n2, 1))])])\n ind = np.arange(res.shape[0])\n np.random.shuffle(ind)\n return np.array(res[ind, :])", "def gausswin(winlen,alpha=2.5):\r\n lh=(winlen-1)/2+1-np.remainder(winlen,2)\r\n gt=np.arange(start=-lh,stop=lh+1,step=1)\r\n \r\n gwin=np.exp(-.5*(alpha*gt/float(lh))**2)\r\n \r\n return gwin", "def gaussian(mu, sigma, start, end):\r\n \r\n val = np.linspace(start, end, 100)\r\n a = 1/(sigma*np.pi)\r\n b = - 0.5 * np.power((mu - val)/sigma, 2)\r\n return a*np.exp(b)", "def gauss_sum_to(n):\n the_sum = n * (n + 1) / 2\n return the_sum", "def gauss_func(x, wid, cen, amp):\n\n return np.exp(-((x-cen)**2.)/(2.*wid**2)) * amp", "def Generate_Ginibre(n):\n G_real = np.random.normal(scale= np.sqrt(1/(2*n)), size=[n,n])\n G_im = np.random.normal(scale= np.sqrt(1/(2*n)), size=[n,n]) * complex(0,1)\n G = G_real + G_im\n return G", "def gauss(x, mu, A, sigma):\n mu, A, sigma = np.atleast_2d(mu), np.atleast_2d(A), np.atleast_2d(sigma)\n val = (A / (sigma * np.sqrt(np.pi * 2)) *\n np.exp(-(x[:, None] - mu)**2 / (2 * sigma**2)))\n return val.sum(axis=-1)", "def Gauss(self, x, height, centre, width, b=0):\n\n return height * np.exp(-(x - centre)**2 / (2 * width**2)) - b", "def gaus(x, A, mu, sigma):\n return A * np.exp(-(x - mu) ** 2 / (2. * sigma ** 2))", "def gauss2(x,a1,c1,w1,a2,c2,w2):\n return gaussian(x,a1,c1,w1)+gaussian(x,a2,c2,w2)", "def rand_tri_gauss(n1=100, n2=100, n3=100, mu1=[1, 1],\n mu2=[-1, -1], mu3=[1, -1], sigma1=[0.1, 0.1],\n sigma2=[0.1, 0.1], sigma3=[0.1, 0.1]):\n ex1 = rand_gauss(n1, mu1, sigma1)\n ex2 = rand_gauss(n2, mu2, sigma2)\n ex3 = rand_gauss(n3, mu3, sigma3)\n res = np.vstack([np.hstack([ex1, 1. * np.ones((n1, 1))]),\n np.hstack([ex2, 2. * np.ones((n2, 1))]),\n np.hstack([ex3, 3. * np.ones((n3, 1))])])\n ind = np.arange(res.shape[0])\n np.random.shuffle(ind)\n return np.array(res[ind, :])", "def _fspecial_gauss_1d(self, size, sigma):\n coords = torch.arange(size).to(dtype=torch.float)\n coords -= size // 2\n g = torch.exp(-(coords ** 2) / (2 * sigma ** 2))\n g /= g.sum()\n return g.reshape(-1)", "def gaussseidel_poissoneq(A, x0):\n return 1", "def gaussed_value(self):\n from random import gauss\n return sorted([0, int(gauss(self.value, self.sigma)), \\\n (self.size*8)-1])[1]", "def gauss_rule(iel, elemType, normal_order, element):\n\n #sctr = element[iel, :] # element connectivity\n\n if ((elemType == 'Q4') and (normal_order <8)):\n W, Q = gpw.gauss_pt_wt(normal_order,'GAUSS',2)\n return W, Q", "def _FSpecialGauss(size, sigma):\n radius = size // 2\n offset = 0.0\n start, stop = -radius, radius + 1\n if size % 2 == 0:\n offset = 0.5\n stop -= 1\n x, y = np.mgrid[offset + start:stop, offset + start:stop]\n assert len(x) == size\n g = np.exp(-((x**2 + y**2)/(2.0 * sigma**2)))\n return g / g.sum()", "def gauss(arrayin, a, ryc=0.0, rxc=0.0):\r\n ny = arrayin.shape[0]\r\n nx = arrayin.shape[1]\r\n # ryc and rxc are the coordinates of the center of the gaussian\r\n # in fractional unints. so ryc = 1 rxc = 1 puts the centre at the \r\n # bottom right and -1 -1 puts the centre at the top left\r\n shifty = int(ryc * ny/2 - 1)\r\n shiftx = int(rxc * nx/2 - 1)\r\n arrayout = np.zeros((ny,nx))\r\n for i in range(0,ny):\r\n for j in range(0,nx):\r\n x = np.exp(-a*((i-ny/2 + 1)**2 + (j-nx/2 + 1)**2))\r\n arrayout[i][j] = x\r\n\r\n if ryc != 0.0 :\r\n arrayout = np.roll(arrayout,shifty,0)\r\n if rxc != 0.0 :\r\n arrayout = np.roll(arrayout,shiftx,1)\r\n\r\n return arrayout", "def test_ln_gauss(self):\n Q = likelihood()\n test_object = test_values_for_likelihood()\n experimental_values = [0.9,2.1,3.2]\n errors = [0.5,0.4,0.3]\n reference_q_value = 0.27347222222222267\n reference_q_derivative = np.array([-3.75833333, -3.75833333])\n Q.add_observable(test_object,experimental_values,errors,scale=1.0)\n q_value, q_derivative = Q.compute_ln_gauss(np.array([1.0,1.0]))\n assert np.isclose(q_value,reference_q_value)\n assert np.all(np.isclose(q_derivative,reference_q_derivative))\n return", "def _fspecial_gauss_1d(self, size, sigma):\n coords = torch.arange(size).to(dtype=torch.float)\n coords -= size // 2\n\n g = torch.exp(-(coords ** 2) / (2 * sigma ** 2))\n g /= g.sum()\n\n return g.unsqueeze(0).unsqueeze(0)", "def ngauss(x,A,c,w):\n sig=w/(2*np.sqrt(2*np.log(2)))\n Amat=np.tile(A,(len(x),1))\n cmat=np.tile(c,(len(x),1))\n sigmat=np.tile(sig,(len(x),1))\n xmat=np.tile(x,(len(A),1)).T\n #for idx in range(len(A)):\n tmp=Amat*np.exp(-((xmat-cmat)**2)/2/sigmat**2)\n # out=out+tmp\n out=tmp.sum(axis=1)\n return out", "def gen_sample ( self , nevents ) :\n if isinstance ( nevents , num_types ) and 0 < nevents :\n return poisson ( nevents )\n elif isinstance ( nevents , VE ) and \\\n ( ( 0 <= nevents.cov2 () and 0 < nevents ) or \n ( 0 < nevents.cov2 () and 0 < nevents + 3 * nevents.error() ) ) :\n for i in range ( 20000 ) :\n n = int ( ve_gauss ( nEvents ) )\n if 0 < n : return n \n else :\n self.error ( \"Can't generate positive number from %s\" % events )\n return\n \n self.error ( \"Can't generate positive number from %s/%s\" % ( events , type ( events ) ) )\n return", "def logGauss(self):\n #firstly initialise an array to store the values\n log = np.zeros([self.num])\n \n #now want to loop through each of the points in the collections\n for i in range(self.num):\n #get the point as an array\n point = self.pick(i)\n #key characteristic of standard normal: can treat as product of independent 1D normals\n log[i] = self.d - np.log(np.sqrt(2 * np.pi)) - 0.5 * np.sum(point**2)\n return log", "def gauss3(x,a1,c1,w1,a2,c2,w2,a3,c3,w3):\n return gaussian(x,a1,c1,w1)+gaussian(x,a2,c2,w2)+gaussian(x,a3,c3,w3)", "def gauss_naive (M, b) -> list:\n dim = len(b)\n\n #Itero sulle Incognite da Trovare\n for i in range(dim):\n\n #Itero sulle righe su cui devo cancellare un elemento\n for j in range(i+1,dim):\n m__j_i = M[j][i] / M[i][i]\n M[j][i] = 0.0\n\n for k in range (i+1,dim):\n M[j][k] = M[j][k] - m__j_i * M[i][k]\n \n b[j] = b[j] - m__j_i * b[i]\n\n return M,b", "def gaussint(x, mean=0.0, sigma=1.0):\n z = (x - mean) / (math.sqrt(2) * sigma)\n return (erf(z) + 1.0) / 2.0", "def _generate_signal(self):\n x = np.arange(self.n, dtype='float')\n resample = np.random.rand(self.n) >= self.proba\n resample[0] = True # randomly initialize first sample\n x[resample] = np.random.randn(np.sum(resample))\n for i in x[~resample]:\n x[int(i)] = x[int(i)-1]\n return x", "def agg_tran_prob_mat(g, step):\n g = my_scale_sim_mat(g)\n g = csc_matrix.toarray(g)\n a_k = g\n a = g\n for k in np.arange(2, step+1):\n a_k = np.matmul(a_k, g)\n a = a+a_k/k\n return a", "def make_gabor(x, frequency, phase, sigma):\n return np.cos(frequency*x + phase) * np.exp(-x**2/2./sigma**2)", "def gauss_seidel(coeficientes, semilla, b, i, n):\n suma = 0\n for j in range(n):\n if j != i and coeficientes[j] != 0:\n suma += (coeficientes[j] * semilla[j]) / coeficientes[i]\n return (b / coeficientes[i]) - suma", "def uni_gaussian(X, mu, sigma2):\n p = (1 / sqrt(2 * pi * sigma2))\n p = p * exp(-power(X - mu, 2) / (2 * sigma2))\n\n def prod(x, y):\n return x * y\n p = array([[reduce(prod, el)] for el in p])\n\n return p", "def _gauss_noise(self, shape):\n\n n = np.random.normal(0, 1, shape)\n return self.e*n", "def param_gauss(xdata_, *params_):\n scale_, mean_, cov_ = params_to_scale_mean_cov(params_)\n return scale_ * gaussian(xdata_, mean=mean_, cov=cov_)", "def gaussian(window_size, sigma):\n gauss = torch.Tensor([math.exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])\n return gauss/gauss.sum()", "def unforeseen():\r\n return random.gauss(300., 100.)", "def build_gp(amplitude, length_scale):\n kernel = tfk.ExponentiatedQuadratic(amplitude, length_scale)\n return tfd.GaussianProcess(kernel=kernel, index_points=x, observation_noise_variance=0.0)", "def Gaussian(x, mu, sigma, a):\n amplitude = a / ( sigma * np.sqrt(2 * np.pi) )\n u = (x - mu) / sigma\n return amplitude * np.exp( -0.5 * (u**2) )", "def gauss_ker(k, sig):\n\tx = np.linspace(-(k//2), (k//2), k)\n\tgx, gy = np.meshgrid(x, x)\n\tkernel = np.exp(-1*(gx**2 + gy**2)/(2*(sig**2)))\n\treturn kernel", "def normal(mean, std):\n\n return random.gauss(mean, std)", "def legendreGauss (func, deg, a, b, ind, bsp, ind2=0):\n\n\tx, w = np.polynomial.legendre.leggauss(deg)\n\tt = 0.5*(x+1)*(b-a)+ a\n\t\n\tgauss = sum(w + func(t, bsp, ind, ind2))*( 0.5*(b-a))\n\n\treturn gauss", "def gaussian_prior(self):\n self.prior = sps.multivariate_normal(self.m0,self.S0)", "def get_value(self) -> float:\n return random.gauss(self._mu, self._sigma)", "def gauss_1d(arrayin,a,ryc=0.0,rxc=0.0):\r\n ny = arrayin.shape[0]\r\n # ryc and rxc are the coordinates of the center of the gaussian\r\n # in fractional unints. so ryc = 1 rxc = 1 puts the centre at the \r\n # bottom right and -1 -1 puts the centre at the top left\r\n shifty = int(ryc * ny/2 - 1)\r\n arrayout = np.zeros((ny))\r\n for i in range(0,ny):\r\n x = np.exp(-a*((i-ny/2 + 1)**2))\r\n arrayout[i] = x\r\n\r\n if ryc != 0.0 :\r\n arrayout = np.roll(arrayout,shifty,0)\r\n return arrayout", "def _gaussian_distribution(self, x: ndarray, mu: float, sigma: float) -> ndarray:\n return 1 / (np.sqrt(2 * np.pi) * sigma) * np.exp(\n -np.power(\n (x - mu) / sigma, 2) / 2)", "def data_gauss(N0, N1=None, mu0=arr([0, 0]), mu1=arr([1, 1]), sig0=np.eye(2), sig1=np.eye(2)):\n\tif not N1:\n\t\tN1 = N0\n\n\td1,d2 = twod(mu0).shape[1],twod(mu1).shape[1]\n\tif d1 != d2 or np.any(twod(sig0).shape != arr([d1, d1])) or np.any(twod(sig1).shape != arr([d1, d1])):\n\t\traise ValueError('data_gauss: dimensions should agree')\n\n\tX0 = np.dot(np.random.randn(N0, d1), sqrtm(sig0))\n\tX0 += np.ones((N0,1)) * mu0\n\tY0 = -np.ones(N0)\n\n\tX1 = np.dot(np.random.randn(N1, d1), sqrtm(sig1))\n\tX1 += np.ones((N1,1)) * mu1\n\tY1 = np.ones(N1)\n\n\tX = np.row_stack((X0,X1))\n\tY = np.concatenate((Y0,Y1))\n\n\treturn X,Y", "def lin_gaus(x,m,b,A,mu,sigma):\n return m*x+b + gaus(x,A,mu,sigma)", "def g_rosenbrock(x, a=1, b=100):\n\n g = np.array(\n [\n -2 * a - 4 * b * x[0] * (-x[0] ** 2 + x[1]) + 2 * x[0],\n b * (-2 * x[0] ** 2 + 2 * x[1]),\n ]\n )\n\n return g", "def test_gauss_array():\n x,g = cw04.gen_gaussian_array(-1,1,3)\n desired = np.array([0.24197072451914337, 0.3989422804014327, 0.24197072451914337])\n print(\"Obtained:\",g)\n print(\"Desired:\",desired)\n # Numpy has built-in testing functions to iterate over arrays and compare\n # values up to certain tolerances\n np.testing.assert_almost_equal(g, desired)", "def src_gauss(l, m, sigma_lm, A=1., i=0., pa=0., l0=0., m0=0.):\n l = np.atleast_1d(l)\n m = np.atleast_1d(m)\n sigma_x = sigma_lm\n sigma_y = sigma_lm * np.cos(i)\n a = 0.5 * ((np.cos(pa) / sigma_x)**2. + (np.sin(pa) / sigma_y)**2.)\n b = 0.5 * np.sin(2. * pa) * (sigma_x**-2. - sigma_y**-2.)\n c = 0.5 * ((np.sin(pa) / sigma_x)**2. + (np.cos(pa) / sigma_y)**2.)\n p = a * (l - l0)**2. + b * (l - l0) * (m - m0) + c * (m - m0)**2.\n I = A * np.exp(-p) / (2. * pi * sigma_x * sigma_y)\n return I", "def get_gauss_kernel(sigma, samples):\n p = ny.ceil (2*ny.sqrt(2*ny.log(2))*sigma)\n r = ny.linspace(-p, p, samples)\n x,y = ny.meshgrid(r, r)\n b=bivariate_normal(x,y,sigma,sigma)\n A=(1/ny.sum(b))\n B=A*b\n return x,y,B", "def generate_gaussian():\n amp = 10 * numpy.random.chisquare(3)\n width = numpy.random.chisquare(3)\n mean = numpy.random.uniform(-10 + width, 10 - width)\n x = numpy.linspace(-10, 10, 500)\n y = amp * numpy.exp(- (x - mean) ** 2 / width ** 2)\n add_noise(y, 0.1)\n return x, y", "def gauss_seidel(iterate, x, tol=1.0e-9, relaxation=True):\n omega = 1\n k = 10\n p = 1\n for i in range(1,501):\n xold = x.copy()\n x = iterate(x, omega)\n dx = sqrt(dot(x - xold, x - xold))\n if dx < tol:\n return x, i, omega\n if relaxation:\n # Compute of relaxation factor after k+p iterations\n if i == k:\n dx1 = dx\n if i == k + p:\n dx2 = dx\n omega = 2.0 / (1.0 + sqrt(1.0 - (dx2 / dx1)**(1.0 / p)))\n print 'Gauss-Seidel failed to converge'", "def perfect_sq_seq_gen(num):\n for i in range(num):\n yield i ** 2", "def Gauss(self, center_x, center_y, width_x, width_y, height=1.0):\n width_x = float(width_x)\n width_y = float(width_y)\n return lambda x,y: height*math.exp(-(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)", "def guassian_initalisation(num_inputs, num_output, relus=False):\n\n return np.random.normal(0, 0.01, size=(num_inputs, num_outputs))", "def gauss_hermite(dim=1, num_quad_pts=20):\n # sigma_pts, weights = hermgauss(num_quad_pts) # Gauss-Hermite sigma points and weights\n sigma_pts, weights = mvhermgauss(num_quad_pts, dim)\n sigma_pts = np.sqrt(2) * sigma_pts.T\n weights = weights.T * pi ** (-0.5 * dim) # scale weights by 1/√π\n return sigma_pts, weights", "def gauss_smooth(data, sigma):\n\t\t\t# make the kernel 5 sigmas wide in each direction\n\t\t\tkernel = stats.norm.pdf(np.arange(-5*sigma, (5*sigma)+1), scale=sigma)\n\t\t\t\n\t\t\treturn sp.ndimage.convolve1d(data, kernel, axis=2)", "def gauss_seidel(L, U, S, x, k):\n\tn = len(x) - 1\n\tm = len(x)//2\n\t\n\t# Leftmost\n\tx[0] = (S[0]/k - U[0, 1]*x[1])/L[0, 0]\n\t# Interior\n\tfor i in range(1, m):\n\t\tx[i] = (S[i]/k - L[i, i - 1]*x[i - 1] - U[i, i + 1]*x[i + 1])/L[i, i]\n\tfor i in range(m, n):\n\t\tx[i] = (S[i]/k - L[i, i - 1]*x[i - 1] - U[i, i + 1]*x[i + 1] -\n\t\t L[i,i-m]*x[i-m]) / L[i,i]\n\t# Rightmost\n\tx[n] = (S[n]/k - L[n, n - 1]*x[n - 1] - L[n, n - m]*x[n - m])/L[n, n]\n\treturn x", "def onedgauss(x,H,A,dx,w):\n #H,A,dx,w = params\n return H+A*np.exp(-(x-dx)**2/(2*w**2))", "def gauss_score(_r, _mu=0, _sigma=512):\n return np.exp(-(_r - _mu)**2 / (2*_sigma**2)) # / 2", "def test_gauss(gaussian):\n x_gauss, x = gaussian\n\n assert len(x) == 25\n assert x_gauss[0] == x_gauss[-1]", "def test_gauss_list():\n x,g = cw04.gen_gaussian_list(-1,1,3)\n desired = [0.24197072451914337, 0.3989422804014327, 0.24197072451914337]\n print(\"Obtained:\",g)\n print(\"Desired:\",desired)\n # For comparing floating point values, nose has useful helper functions\n # to ensure they are equal up to a numerical precision tolerance\n nose.tools.assert_almost_equal(g, desired)", "def _f_special_gauss(size, sigma):\n radius = size // 2\n offset = 0.0\n start, stop = -radius, radius + 1\n if size % 2 == 0:\n offset = 0.5\n stop -= 1\n x, y = np.mgrid[offset + start:stop, offset + start:stop]\n assert len(x) == size\n g = np.exp(-((x ** 2 + y ** 2) / (2.0 * sigma ** 2)))\n g /= g.sum()\n return tf.constant(g, dtype=tf.float32)", "def generate_S_u(t):\n t_binning = unique_binning(t)\n return generate_S_from_binning(t_binning)", "def fun_exp_p_gauss(p,r):\n return p[1] * np.exp(-((r**2/p[0]))) + p[3] * np.exp(-((np.abs(r)/p[2])))", "def g(self, X):\n\n return (X[0])**2 - 2*X[0] + X[1]**3 - 2", "def agauss(self, X, xm, amp, w, a):\n # w(x) = 2 * w / (1 + np.exp(a * (X - xm)))\n return amp * np.exp(-((X - xm) / (2 * w / (1 + np.exp(a * (X - xm))))) ** 2)", "def g(self, x):\n return x * (1 - x)", "def propose(x, jump = 0.1):\n\treturn (x[0] + random.gauss(0, jump), x[1] + random.gauss(0, jump))", "def gauss_quadrature(ab, N):\n\n from numpy.linalg import eigh\n\n n = ab.shape[0]\n if n+1 < N:\n raise IndexError('Require N+1 recurrence coefficients for an N-point rule.')\n\n J = np.diag(ab[1:N,1], k=1) + np.diag(ab[:N,0],k=0) + np.diag(ab[1:N,1], k=-1)\n lamb,v = eigh(J)\n\n return lamb, v[0,:]**2", "def gaussian_elimination_special_case(b):\n n = len(b)\n # init new (prime) arrays\n beta_prime = np.empty(n)\n beta_prime[0] = 2\n\n b_prime = np.empty(n)\n b_prime[0] = b[0]\n\n v = np.empty(n)\n i_array = np.arange(n)\n beta_prime = (i_array+2) / (i_array+1)\n\n for i in range(1,n):\n b_prime[i] = b[i] + (b_prime[i-1] / beta_prime[i-1])\n\n v[-1] = b_prime[-1] / beta_prime[-1]\n\n for i in range(n-2, -1, -1):\n v[i] = (b_prime[i] + v[i+1])/ beta_prime[i]\n\n return v", "def klucb_gauss(x, d, sig2=1., precision=0.):\n return x + sqrt(2*sig2*d)", "def gaussian(height, center_x, center_y, width_x, width_y, rotation):\n width_x = float(width_x)\n width_y = float(width_y)\n\n rotation = np.deg2rad(rotation)\n center_x = center_x * np.cos(rotation) - center_y * np.sin(rotation)\n center_y = center_x * np.sin(rotation) + center_y * np.cos(rotation)\n\n def rotgauss(x,y):\n xp = x * np.cos(rotation) - y * np.sin(rotation)\n yp = x * np.sin(rotation) + y * np.cos(rotation)\n g = height*np.exp(\n -(((center_x-xp)/width_x)**2+\n ((center_y-yp)/width_y)**2)/2.)\n return g\n return rotgauss" ]
[ "0.6865911", "0.6794377", "0.6759611", "0.6687887", "0.6683238", "0.6680485", "0.66098064", "0.65058136", "0.6487504", "0.6293768", "0.62710446", "0.62198", "0.62048715", "0.6181345", "0.61764723", "0.6148736", "0.6144734", "0.6125924", "0.60800266", "0.607092", "0.606629", "0.60621977", "0.60471153", "0.59592617", "0.5952665", "0.59514683", "0.5928697", "0.5904709", "0.5881537", "0.58810157", "0.58742416", "0.5863308", "0.5851102", "0.5848479", "0.5842684", "0.58381075", "0.5837058", "0.5831333", "0.58218294", "0.58126885", "0.579448", "0.5769377", "0.57681227", "0.5763325", "0.5755366", "0.57475615", "0.5724698", "0.5721845", "0.5714012", "0.5669324", "0.5667487", "0.56672907", "0.5645073", "0.5643941", "0.56329846", "0.56298614", "0.55987185", "0.55802065", "0.55672", "0.55656135", "0.55651623", "0.5563112", "0.5555294", "0.5546481", "0.55330426", "0.5525891", "0.55201757", "0.5507933", "0.5507929", "0.55017376", "0.55014783", "0.5500647", "0.5500153", "0.5500134", "0.5479058", "0.5478269", "0.54718477", "0.54525256", "0.54509187", "0.5447352", "0.5440251", "0.54398984", "0.54349065", "0.54328954", "0.542334", "0.5416118", "0.5414383", "0.541156", "0.541095", "0.54082817", "0.540474", "0.5401241", "0.5392969", "0.5387401", "0.5382206", "0.5376857", "0.5373057", "0.53717947", "0.5371482", "0.53701335", "0.53696" ]
0.0
-1
Settings for a random step sequence
def __init__(self, step_time, saw_time, step_interval=None, n_step=None, ss=None): self.ss = ss self.n_step = n_step self.interval = step_interval self.step_time = step_time self.saw_time = saw_time
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def random_step(self):\n pos = [i for i in range(9) if self.grid[i] == 0]\n move = random.choice(pos)\n return self.step(move)", "def random_step(self):\n pos = [i for i in range(9) if self.grid[i] == 0]\n move = random.choice(pos)\n return self.step(move)", "def generate_random(self: object) -> None:\n self.random.set(Sequence.generate(length=50))", "def take_step(self):\n choices_of_steps = [(0,1), (1,0), (0,-1), (-1,0)]\n return random.choices(choices_of_steps)[0]", "def _setup_next_sequence(cls):\n return 0", "def randomize_trajectory(self):\n self.angle = randint(-360, 360)\n self.speed = randint(1, 5)/2.5", "def setseq():\n\n if seqRnd[0]:\n r = getrndseq(seqRndWeight)\n if seqRnd[0] == 1:\n gbl.seqCount = r\n r = -1\n else:\n r = -1\n\n return ( r, seqRnd[1:] )", "def _setVals(self, step=0):\n self.step = step", "def evaluate_config(rnd: int):\n val_steps = 5 if rnd < 4 else 10\n return {\"val_steps\": val_steps}", "def init_random_state(self):\n self.current_state = self.rng.uniform(size=[1, self.num_spins])\n self.current_state = np.where(self.current_state < 0.5, -1.0, 1.0)", "def experiment4():\n np.random.seed()\n state['result'] = np.random.rand(1)", "def test_case_generate(self):\n\n # initialization\n state = np.random.choice(self.init_states)\n model = rm.randint(0, self.model_num - 1)\n duration = np.random.choice(self.step_values)\n temp = rm.randint(self.min_temp, self.max_temp)\n\n self.states = [[model, duration, temp]]\n self.time = duration\n\n while self.time < self.max_time:\n if state == \"inc_tmp\":\n change = np.random.choice(\n self.transitionName[0], p=self.transitionMatrix[0]\n ) # choose the next state\n if change == \"S1S1\": # stay in the same state\n temp = self.get_temp_inc(temp)\n model = rm.randint(0, self.model_num - 1)\n diff = (\n self.max_time - self.time\n ) # this is for ensuring the maximum duration is not exceeded\n if (diff) < self.max_step and (diff) > self.min_step:\n duration = diff\n self.states.append([model, duration, temp])\n return self.states_to_dict()\n elif diff < self.min_step:\n self.states[len(self.states) - 1][1] += diff\n return self.states_to_dict()\n else:\n duration = np.random.choice(self.step_values)\n self.time += duration\n self.states.append([model, duration, temp])\n\n elif change == \"S1S2\": # change from increase to decrease\n temp = self.get_temp_dec(temp)\n model = rm.randint(0, self.model_num - 1)\n state = \"dec_tmp\"\n\n diff = self.max_time - self.time\n if (diff) < self.max_step and (diff) > self.min_step:\n duration = diff\n self.states.append([model, duration, temp])\n return self.states_to_dict()\n elif diff < self.min_step:\n self.states[len(self.states) - 1][1] += diff\n return self.states_to_dict()\n else:\n duration = np.random.choice(self.step_values)\n self.time += duration\n self.states.append([model, duration, temp])\n else:\n print(\"Error\")\n\n elif state == \"dec_tmp\":\n change = np.random.choice(\n self.transitionName[1], p=self.transitionMatrix[1]\n )\n if change == \"S2S1\":\n temp = self.get_temp_inc(temp)\n model = rm.randint(0, self.model_num - 1)\n state = \"inc_tmp\"\n diff = self.max_time - self.time\n if (diff) < self.max_step and (diff) > self.min_step:\n duration = diff\n self.states.append([model, duration, temp])\n return self.states_to_dict()\n elif diff < self.min_step:\n self.states[len(self.states) - 1][1] += diff\n return self.states_to_dict()\n else:\n duration = np.random.choice(self.step_values)\n\n self.time += duration\n self.states.append([model, duration, temp])\n\n elif change == \"S2S2\":\n temp = self.get_temp_dec(temp)\n model = rm.randint(0, self.model_num - 1)\n\n diff = self.max_time - self.time\n if (diff) < self.max_step and (diff) > self.min_step:\n duration = diff\n self.states.append([model, duration, temp])\n return self.states_to_dict()\n elif diff < self.min_step:\n self.states[len(self.states) - 1][1] += diff\n return self.states_to_dict()\n else:\n duration = np.random.choice(self.step_values)\n self.time += duration\n self.states.append([model, duration, temp])\n\n else:\n print(\"Error\")\n pass\n else:\n print(\"Error\")\n\n return self.states_to_dict()", "def next_state(self):\n \n self.state = np.random.choice(['checkout', 'dairy', 'drinks', 'fruit', 'spices'], p=self.tr_array_dict[f'{self.state}'])", "def set_rand_seed(self, idx):\n random.seed(self.base_seed + self.epoch + idx // 2)", "def step(self):\n\n self.agents[random.randint(self.get_agent_count())].step()\n self.steps += 1\n self.time += 1", "def setUp(self) -> None:\n self.random = np.random.RandomState(seed=42)", "def sequence_params(self):", "def initialize_random_number_generator(self,question_type):\n\t\tself.generator.seed(self.generate_index(self.magic, self.level, self.problem_id, question_type))", "def setRandom(self):\n pass # define each VarElement family", "def swait_setup_random_number(swait, **kw):\n swait.reset()\n swait.scan.put(\"Passive\")\n swait.calc.put(\"RNDM\")\n swait.scan.put(\".1 second\")\n swait.desc.put(\"uniform random numbers\")", "def nextPhase(self):\n\n if self.sensorType == SENSOR_TYPES[\"TEMP\"]:\n self.value = self.randGen.choice(TEMP_RANGE)\n else:\n self.value = self.randGen.randint(0, 100)", "def random_walk(turtle, distance, steps):\n turtle.color(randcolor(), randcolor())\n for step in range(0,steps):\n random_move(turtle, distance)\n gohome(turtle)", "def seed():", "def setUp(self):\n self.samples = 5\n self.otus = 10\n seed(0) # this will seed numpy prng at 0 before each test", "def set_first_machine_time_step(self, first_machine_time_step):", "def step_particles(particle,self):\n\n self.models[particle].step()\n\n self.states[particle] = (self.models[particle].agents2state()\n\n + np.random.normal(0, self.particle_std**2, \n\n size=self.states[particle].shape))\n\n self.models[particle].state2agents(self.states[particle])\n\n return self.models[particle], self.states[particle]", "def set_IStep(self, istep = [0], istep_sigma = [0], tstep = 5, tdur = 1e6, give_freq = True):\n if self.do_run:\n #for m in self.ic_steps:\n # m.destroy()\n # del m \n #del self.ic_steps\n \n #self.ic_steps = []\n \n istep = list(istep)\n neg = False\n \n for n in range(self.n_celltypes):\n \n if istep[n] < 0: \n neg = True\n istep[n] = abs(istep[n]) # make positive again\n \n if istep[n] != 0:\n if give_freq is True:\n a = np.array([istep[n]])\n iin = self.get_i(a, n)[0]\n if self.id == 0: print \"celltype: \", n, \" istep: \", istep[n], \"Hz => \", iin, \" nA\"\n istep[n] = iin \n \n for n in range(self.n_celltypes):\n \n for i, gid in enumerate(self.gidlist[n]): # for every cell in the gidlist \n \n np.random.seed(gid*30)\n \n if self.i_holdrs == []:\n \n if istep_sigma[n] != 0:\n istep_r = np.random.normal(istep[n], istep[n]*istep_sigma[n], 1).clip(min=0)\n else: # same ihold for all cells!\n istep_r = istep[n]\n \n else: # ihold has been set!\n \n if istep_sigma[n] != 0:\n istep_r = np.random.normal(istep[n]-self.i_holds[n], (istep[n]-self.i_holds[n])*istep_sigma[n], 1).clip(min=0) # delta now! put on top of hold!\n else: # same ihold for all cells!\n istep_r = istep[n]-self.i_holds[n] # delta now! put on top of hold!\n \n if neg:\n istep_r = -1*istep_r\n \n if istep[n] == 0:\n istep_r = -1*self.i_holdrs[n][i] \n \n #print 'is:' + str(istep_r) + 'was:' + str(self.i_holdrs[n][i])\n \n if istep_r != 0: \n # step current\n ic_step = h.IClamp(self.cells[n][i].soma(0.5))\n ic_step.delay = tstep/ms\n ic_step.dur = tdur/ms\n ic_step.amp = istep_r/nA\n self.ic_steps.append(ic_step)\n \n \n if self.id == 0: print \"set_IStep finished. istep: \", istep, \", istep_sigma: \", istep_sigma", "def _choose_sample(self):\n\n \t #periodically generate a new reconstruction for the purposes of sampling", "def setUp(self):\n # record the randomness used in case the test fails:\n self.rand_seed = int(time.time())\n sr.seed(self.rand_seed)\n print(\"seed for this test: \" + str(self.rand_seed))", "def totem_random():\n random_head()\n random_head()\n random_head()", "def simulate(self):\n self.round += 1", "def set_seed(self, seed: int):\n self.rsimulator.set_seed(seed)\n # Maybe call new game here?", "def NewRndSeed(ss):\n ss.RndSeed = int(datetime.now(timezone.utc).timestamp())", "def setUp(self):\n # record the randomness used in case the test fails:\n rand_seed = int(time.time())\n sr.seed(rand_seed)\n print(\"seed for this test: \" + str(rand_seed))", "def __random_movement(self):\n\t\tself.__steps += 1 \t\t# Increment after every frame\n\t\t# When __steps greater than threshold reverse the direction\n\t\t# and set threshold to a new random value\n\t\tif self.__steps >= self.__threshold_steps:\t\n\t\t\tif self.direction == 'RIGHT':\n\t\t\t\tself.move_left()\n\t\t\t\tself.direction = 'LEFT'\n\t\t\telse:\n\t\t\t\tself.move_right()\n\t\t\t\tself.direction = 'RIGHT'\n\t\t\tself.__threshold_steps = random.randint(25,50)\n\t\t\tself.__steps = 0\n\t\t# Confines the Donkeys movement to within the boundary \n\t\tself.__check_boundary()", "def rand(self):\n raise NotImplementedError", "def step(self, state):", "def random(self, n=1):\n # self.num_generated += n", "def random_params_gen(self) -> TransformParams:\n while True:\n do_hor_flip = self.horizontal_flip and (np.random.random() < 0.5)\n do_vert_flip = self.vertical_flip and (np.random.random() < 0.5)\n\n yield TransformParams(do_hor_flip=do_hor_flip,\n do_vert_flip=do_vert_flip)", "def SetRandomSeed(seed):\n global option\n option['random_seed'] = seed", "def __init__(self, random_state):\n self.random_state = random_state\n self.random_generator = RandomState(self.random_state)", "def _sample_seed(self):\n raise Exception(\" not implemented in base model\")", "def start_new_chain(self, random_seed=None):\n if random_seed is not None:\n np.random.seed(random_seed)\n\n if self.n_burning > 0:\n parameters = self.sample_parameters(float(self.n_burning) / (self.thinning + 1))\n else:\n parameters = [self.samples_parameters[-1]]\n\n self.samples_parameters = []\n self.samples_parameters.append(parameters[-1])\n self.start_point_sampler = parameters[-1]", "def setup_method(cls):\n seed()", "def corun(self, step:int=0):\n if step > 0:\n _range = range(self.current_step, self.current_step + step + 1)\n else: # run forever\n _range = itertools.count(self.current_step)\n for step_num in _range:\n self.config_template = (yield self.step()) or self.config_template", "def randomize(self):\n \n spins = [np.random.random() > 0.5 for x in range(self.size)]\n self.spins_initial = bitarray.bitarray(spins)", "def step(self, steps):\n self._simulate(endStep=self.currentStep+steps)", "def _sample_steps(self):\n mixture_size = self.parameters['fixed_mixture_size']\n if not self.is_correlated_mixture and mixture_size is None:\n return self.get_steps('monte_carlo')\n else:\n return self.get_steps('metropolis')", "def _random_warmup(self, num_steps):\n new_frame = self.env.reset()\n reward = 0.0\n action = 0\n done = False\n self.memory.add_experience(action, reward, new_frame, 1, done)\n \n for i in range(num_steps):\n \n action = np.random.randint(self.num_actions)\n new_frame, reward, done, _ = self.env.step(action)\n self.memory.add_experience(action, reward, new_frame, 1, done)\n \n if done:\n new_frame = self.env.reset()\n self.memory.add_experience(0, 0.0, new_frame, 1, False)\n\n self.memory.add_experience(0, 0.0, new_frame, 1, True)", "def rand(self):\n self.state = (self.a * self.state + self.c)\n return self.state", "def randomize_position(self, w, steps = 3):\n \n #self.red.set_power(0)\n \n for k in range(steps):\n for idx,waveplate in enumerate(w):\n print '* Randomizing %s waveplate (step %d) ...'%(waveplate, k)\n self.rotator.quick_scan(np.random.uniform(low = -20000, high = 20000) ,getattr(self,'_'+waveplate+'_channel'))", "def update_random_state(self):\n self.random_state = RandomState()", "def setSeqRnd(ln):\n\n global seqRnd\n\n emsg = \"use [ON, OFF or TrackList ]\"\n if not ln:\n error(\"SeqRnd:\" + emsg)\n\n a=ln[0].upper()\n\n if a in (\"ON\", \"1\") and len(ln) == 1:\n seqRnd = [1]\n\n elif a in (\"OFF\", \"0\") and len(ln) == 1:\n seqRnd = [0]\n\n else:\n seqRnd=[2]\n for a in ln:\n a = a.upper()\n if not a in gbl.tnames:\n error(\"SeqRnd: Track '%s' does not exist, %s\" % (a, emsg))\n if a in seqRnd:\n error(\"SeqRnd: Duplicate track '%s' specified, %s\" % (a, emsg))\n seqRnd.append(a)\n\n if gbl.debug:\n print \"SeqRnd:\",\n if seqRnd[0] == 2:\n for a in seqRnd[1:]:\n print a,\n print\n elif seqRnd[0] == 1:\n print \"On\"\n else:\n print \"Off\"", "def set_seed(self,seed):\r\n if seed is None:\r\n warnings.warn(\r\n \"Initializing player with seed from Axelrod module random number generator. \"\r\n \"Results may not be seed reproducible.\")\r\n self._seed = _module_random.random_seed_int()\r\n else:\r\n self._seed = seed\r\n self._random = RandomGenerator(seed=self._seed)\r\n self.base._random = self._random\r\n self.trust._random = self._random\r\n self.conviction._random = self._random\r\n \r\n self.generator = torch.Generator()\r\n self.generator.manual_seed(int(seed))", "def step(self, step=None):\n pass", "def start_random_sequence(self) -> int:\n return random.randint(0, TWO_BYTES)", "def rand(self):\n raise NotImplementedError(\"Not implemented yet.\")", "def configure_step(self):\n pass", "def setRandDirection(self):\n phi = 2*math.pi*random.random()\n u = 2*random.random() - 1\n v = math.sqrt(1-u*u)*math.cos(phi)\n w = math.sqrt(1-u*u)*math.sin(phi)\n self.direction = (u,v,w)", "def set_states(self, states):\n if states is None:\n logging.getLogger('eval').warning(\n 'could not reproduce state, setting unreproducable random seed for all random states')\n self.randomstate.seed(np.random.randint(0, 1000000))\n if hasattr(self, 'random_mask_state'):\n self.random_mask_state.seed(np.random.randint(0, 100000))\n if hasattr(self, 'deformrandomstate'):\n self.deformrandomstate.seed(np.random.randint(0, 100000))\n else:\n if hasattr(self, 'random_mask_state') and 'random_mask_state' in states:\n self.random_mask_state.set_state(states['random_mask_state'])\n if hasattr(self, 'deformrandomstate') and 'deformrandomstate' in states:\n self.deformrandomstate.set_state(states['deformrandomstate'])\n self.randomstate.set_state(states['randomstate'])", "def configure_step(self):\n\n pass", "def setSeqRndWeight(ln):\n\n global seqRndWeight\n\n seqRndWeight = getweights(ln, \"SeqRndWeight\")", "def setRandomSensitivitySpeed(self) -> None:\n\n self.sensitivity = randint(20, 70)\n self.speed = randint(7, 12)", "def __dice_generator(self):\n self.current_dice = np.random.randint(1, 6 + 1)", "def seed(seed: int):\n # all sampling is actually happening in the move_cube module\n move_cube.seed(seed)", "def SetStepDelay(self,delay=200): \n self.Bus.Transaction(chr(self.Address)+chr(0x43)+chr(delay))", "def __init__(self):\n \"\"\" action_ space : pick up location , Drop location\n state_space : location , time (hours) , day\n state_init : random pick from the state_space \"\"\"\n self.action_space = [(i,j) for i in range(m) for j in range(m) if i!=j or i==0]\n # Total states (Xi Tj Dk)\n self.state_space = [[x, y, z] for x in range(m) for y in range(t) for z in range(d)]\n # random Initialize of state (location, hours, day)\n self.state_init = random.choice(self.state_space)\n # Start the first round\n self.reset()", "def _random_start_position(self):\r\n self.position = np.array(random.choice(self.start_positions),\r\n dtype=np.int16)", "def _rgbSequenceInit(self):\n ## send all of this to sequence acq\n if not self.nbFrames:\n self.nbFrames = int(self.duration/self.cycleTime)+1 ## Determine number of frames. (+1) because int round at the lower int\n self.ledSeq = [0]*self.rgbLedRatio[0]+[1]*self.rgbLedRatio[1]+[2]*self.rgbLedRatio[2] #Sequence of LED lighting in function of the ratio\n #RED = 0\n #GREEN = 1\n #BLUE = 2\n print('LED sequence : ', self.ledSeq)\n self.ledList = self.ledSeq*(int(self.nbFrames/(len(self.ledSeq)))+1) ## schedule LED lighting\n #NB : no return needed because each ledList and nbFrames are instance attribute", "def reinitialize(self, random_state):\n pass", "def _set_seed(self) -> None:\r\n random.seed(self.seed)\r\n np.random.seed(self.seed)", "def trial_config(self, prev_config, cov_config=1e-2):\r\n return prev_config + np.random.normal(0, cov_config, len(prev_config))", "def randomize_value(self) -> None:", "def make_step(self):\n self.step_vals = np.cumsum(self.vals)", "def gen_random_walk(self,n_step=100):\n # Warning about the small number of steps\n if n_step < 30:\n print(\"WARNING! The number of steps is small. It may not generate a good stochastic process sequence!\")\n \n w = np.ones(n_step)*self.x0\n \n for i in range(1,n_step):\n # Sampling from the Normal distribution with probability 1/2\n yi = np.random.choice([1,-1])\n # Weiner process\n w[i] = w[i-1]+(yi/np.sqrt(n_step))\n \n return w", "def set_seed(self, seed=None):\n super().set_seed(seed=seed)\n for t in self.policy_list:\n t.set_seed(self._random.random_seed_int())", "def _step(self):\n title()\n self.runCount = 1\n self.experiment.pause = False\n self._runExperiment()\n self.pause = True", "def reset(self):\n \n self.steps = 0\n if self.episode == 0:\n self.ins = random.uniform(self.mins.values[:4],self.maxes.values[:4])\n #get the corresponding outputs:\n out_flow = self.temp_func(var=self.O_CH4_flow_uniformity)\n out_frac = self.temp_func(var=self.O_CH4_mol_frac)\n out_temp = self.temp_func(var=self.O_t)\n\n outs = np.array([out_flow,out_frac,out_temp])\n self.starts = np.append(self.ins, outs)\n\n else:\n self.starts = self.state[:7] #previous episode's end state\n\n #get goals from random inputs:\n viable = False\n while viable == False:\n self.ins = random.uniform((self.mins.values[:4]+(self.mins.values[:4]*self.minmaxbuffer)),self.maxes.values[:4]-(self.maxes.values[:4]*self.minmaxbuffer))\n out_flow = self.temp_func(var=self.O_CH4_flow_uniformity)\n out_frac = self.temp_func(var=self.O_CH4_mol_frac)\n out_temp = self.temp_func(var=self.O_t)\n\n outs = np.array([out_flow,out_frac,out_temp])\n \n # Check if viable:\n viable = self.test_viable(outs)\n\n self.goals = outs\n\n # These are your current inputs:\n self.ins = self.starts[:4]\n # State carries the starting points and the goals.\n self.state = np.append(self.starts,self.goals)\n\n #Track episodes and total reward.\n self.episode += 1\n self.tot_rew = 0\n\n return (self.state)", "def step(self, **kwargs):\n pass", "def reset(self, setup=False):\n self._done = False\n self._nbSteps = 0\n\n x = None\n if (self.startPosX == 'random' and setup) or (\n self.startPosX == 'episodeRandom'):\n x = random.randint(0, self._width - 1)\n elif (self.startPosX == 'random' and not setup):\n x = self._initState[0]\n elif self.startPosX == 'center':\n x = self._width - 1\n else:\n x = int(self.startPosX)\n\n y = None\n if (self.startPosX == 'random' and setup) or (\n self.startPosX == 'episodeRandom'):\n y = random.randint(0, self._height - 1)\n elif (self.startPosY == 'random' and not setup):\n y = self._initState[1]\n elif self.startPosX == 'center':\n y = self._height - 1\n else:\n y = int(self.startPosX)\n\n self._currentPos = (x, y)\n self._trajectory = [(x, y)]\n\n return (x, y)", "def test_random_movement(\n size: Union[int, tuple], num_berries: int, delay_seconds: int, number_steps: int\n) -> None:\n game = Game(\n size,\n [0, 0],\n -1,\n 5,\n -5,\n 10,\n num_berries,\n berry_movement_probabilities=[0.5] * num_berries,\n )\n print(f\"Starting board:\\n{game.get_board()}\")\n done = False\n i = 1\n while not done and i < number_steps:\n print(f\"Action {i}\")\n time.sleep(delay_seconds)\n _, reward, done = game.step(random.choice(MOVEMENTS))\n print(f\"Board:\\n{game.get_board()}\")\n print(f\"Reward: {reward}\")\n i += 1", "def setup_steps(self):\n step1 = ground_step.Ground(5745, 495, 40, 44)\n step2 = ground_step.Ground(5788, 452, 40, 44)\n step3 = ground_step.Ground(5831, 409, 40, 44)\n step4 = ground_step.Ground(5874, 366, 40, 176)\n\n step5 = ground_step.Ground(6001, 366, 40, 176)\n step6 = ground_step.Ground(6044, 408, 40, 40)\n step7 = ground_step.Ground(6087, 452, 40, 40)\n step8 = ground_step.Ground(6130, 495, 40, 40)\n\n step9 = ground_step.Ground(6345, 495, 40, 40)\n step10 = ground_step.Ground(6388, 452, 40, 40)\n step11 = ground_step.Ground(6431, 409, 40, 40)\n step12 = ground_step.Ground(6474, 366, 40, 40)\n step13 = ground_step.Ground(6517, 366, 40, 176)\n\n step14 = ground_step.Ground(6644, 366, 40, 176)\n step15 = ground_step.Ground(6687, 408, 40, 40)\n step16 = ground_step.Ground(6728, 452, 40, 40)\n step17 = ground_step.Ground(6771, 495, 40, 40)\n\n step18 = ground_step.Ground(7760, 495, 40, 40)\n step19 = ground_step.Ground(7803, 452, 40, 40)\n step20 = ground_step.Ground(7845, 409, 40, 40)\n step21 = ground_step.Ground(7888, 366, 40, 40)\n step22 = ground_step.Ground(7931, 323, 40, 40)\n step23 = ground_step.Ground(7974, 280, 40, 40)\n step24 = ground_step.Ground(8017, 237, 40, 40)\n step25 = ground_step.Ground(8060, 194, 40, 40)\n step26 = ground_step.Ground(8103, 194, 40, 360)\n\n step27 = ground_step.Ground(8488, 495, 40, 40)\n\n self.step_group = pygame.sprite.Group(step1, step2,\n step3, step4,\n step5, step6,\n step7, step8,\n step9, step10,\n step11, step12,\n step13, step14,\n step15, step16,\n step17, step18,\n step19, step20,\n step21, step22,\n step23, step24,\n step25, step26,\n step27)", "def __init__(self, env, random_seed=None):\n self.env = env \n self.RandomState = np.random.RandomState(random_seed)", "def create_step_samples(self):\n pass # Deferred to subclasses\n\n \"\"\" Example using pod height:\n start_value = self.sim.pod.last_height\n end_value = self.sim.pod.height\n\n # Lerp values to get samples\n samples = start_value + self.step_lerp_pcts * (end_value - start_value) # Or use self.lerp(start_value, end_value), but doing it directly is faster since no function call\n if self.noise_scale > 0:\n # Add gaussian noise if specified\n return samples + np.random.normal(0.0, noise_scale, len(samples))\n else:\n # No noise\n return samples \n \"\"\"", "def Gen_RandLine(length, step_max, dims=2):\n \n lineData = np.empty((dims, length))\n lineData[:, 0] = np.random.rand(dims)\n for index in range(1, length):\n step = ((np.random.rand(dims) - 0.5)*step_max)\n lineData[:, index] = lineData[:, index - 1] + step\n return lineData", "def setUp(self):\n self.t = True\n self.f = False\n self.value = 25", "def __init__(self):\n \n smach.State.__init__(self, \n outcomes=['GoToNormal','GoToPlay'])\n \n self.rate = rospy.Rate(200) # Loop at 50 Hz", "def setUp(self):\n shape = RNG.integers(5, 50)\n periods = self.periods = RNG.normal() * 3\n freq = periods / shape\n amp = RNG.normal()\n offset = RNG.normal()\n phase = (RNG.normal() - 1 / 2) * 5 / 3 * np.pi\n p_gt = self.p_gt = (amp, freq, phase, offset)\n x = self.x = np.arange(shape)\n self.data = sine(x, *p_gt)", "def set_seed(self, seed: int):\n self.__sim.seed(seed)", "def __init__(self, allow_step_back=False):\n self.allow_step_back = allow_step_back\n self.np_random = np.random.RandomState()\n \"\"\" No big/small blind\n # Some configarations of the game\n # These arguments are fixed in Leduc Hold'em Game\n # Raise amount and allowed times\n self.raise_amount = 2\n self.allowed_raise_num = 2\n self.num_players = 2\n \"\"\"\n # Some configarations of the game\n # These arguments can be specified for creating new games\n\n # Small blind and big blind\n self.small_blind = 1\n self.big_blind = 2 * self.small_blind\n\n # Raise amount and allowed times\n self.raise_amount = self.big_blind\n self.allowed_raise_num = 2\n\n self.num_players = 2", "def test_init(self):\n global_step = tf.get_variable(\"global_step\", [], tf.int32,\\\n initializer=tf.constant_initializer(0, dtype=tf.int32),\n trainable=False)\n lstm_pi = LSTMPolicy((80,80,3), 4,global_step)", "def __init__(self, rate):\n super(RandomWander, self).__init__()\n self.iteration = 0\n self.rate = rate\n self.speed = 0\n self.heading = 0", "def step(self): \n self.reset_parameters()\n\n if np.random.uniform(0, 1) < self.model.churn_prob: self.exit_triggered = True \n if self.exit_triggered:\n self.exit()\n else:\n self.register_deposit(self.deposit_intent)\n self.register_contribution(self.contribution_intent)\n self.register_sponsorship(self.sponsor_intent)\n self.register_euro_exchange(self.euro_exchange_intent)\n self.register_teo_exchange(self.teo_exchange_intent)\n self.register_withdraw(self.withdraw_intent)", "def rand(self):\n return self.State.rand()", "def test_init():\n rng = NonRandom()\n seed = 5\n rng.setSeed(seed)\n wheel = Wheel(rng)\n assert len(wheel.bins) == 38\n assert wheel.rng.value == seed\n assert wheel.rng.choice(range(0, 38)) == range(\n 0, 38)[wheel.rng.value] # == seed", "def generator(self, random, args):\r\n raise NotImplementedError", "def test_run_sim_1():\n rnd = rand.Arrivals(36, 41)\n sim.run_sim(3, 2, 5, 6, 22, rnd)", "def play_random(env, steps):\n try:\n done = True\n progress = tqdm(range(steps))\n for _ in progress:\n if done:\n _ = env.reset()\n action = env.action_space.sample()\n _, reward, done, info = env.step(action)\n progress.set_postfix(reward=reward, info=info)\n env.render()\n except KeyboardInterrupt:\n pass\n # close the environment\n env.close()", "def make_repeatable():\n random.seed(1234)\n np.random.seed(1234)", "def __init__(self, initial_value, n_values, schedule):\n self.step = 0.\n self.initial_value = initial_value\n self.nvalues = n_values\n self.schedule = SCHEDULES[schedule]", "def get_next_sample(self):" ]
[ "0.6600735", "0.6600735", "0.6426488", "0.63580966", "0.63181347", "0.6233752", "0.6195526", "0.61665463", "0.6133349", "0.6077554", "0.60269153", "0.5976023", "0.595699", "0.59379584", "0.59343755", "0.59263146", "0.5924577", "0.59218735", "0.5883561", "0.5867385", "0.58269936", "0.5821847", "0.5802779", "0.5774282", "0.57555526", "0.57509786", "0.574638", "0.57450294", "0.5740118", "0.57344127", "0.5717241", "0.57172257", "0.56846654", "0.56758934", "0.567149", "0.56553286", "0.56395864", "0.5639068", "0.5637476", "0.56370026", "0.563278", "0.56323016", "0.5628614", "0.5627348", "0.56260455", "0.5609896", "0.5604609", "0.56016976", "0.5601342", "0.55982894", "0.5597746", "0.55915093", "0.55913436", "0.55911785", "0.5580702", "0.557787", "0.55691504", "0.55665696", "0.55651593", "0.55644536", "0.55555946", "0.5553894", "0.55499774", "0.5549558", "0.55430317", "0.55393404", "0.5526295", "0.5524297", "0.5523442", "0.5521429", "0.55153036", "0.5514629", "0.551019", "0.5507653", "0.5503758", "0.54958874", "0.54932344", "0.5486296", "0.54810774", "0.54761463", "0.5475091", "0.54738265", "0.54711276", "0.5468878", "0.5465642", "0.54418707", "0.5437692", "0.5435852", "0.54288685", "0.54225165", "0.54206306", "0.5419599", "0.54185426", "0.54017556", "0.54011166", "0.53993434", "0.53983533", "0.5395225", "0.53945327", "0.5394164", "0.5389844" ]
0.0
-1
Generate a random sequence
def out(self, t: any, dim=(None, None)) -> any: lB = self.interval[0] # Lower Boundary uB = self.interval[1] # Upper Boundary # Initialize random step vector each sampling period using comprehensive list. step_vector = [round(uniform(lB, uB), 1) for _ in range(self.n_step)] step_vector[0] = self.ss # keep the steady state value as first u = np.zeros(shape=dim) # Initialize step control input array u. j = 0 ramp_Step = self.saw_time count = 1 for i in range(len(t)): # Excluding the last point if t[i] % self.step_time == 0 and t[i] != 0 and j+1 != len(step_vector) and i != len(t)-1: # No last step j += 1 count = 1 if self.ss is not None and j == 0: u[i, :] = self.ss else: if count != ramp_Step: u[i, :] = (step_vector[j] - step_vector[j-1]) * (count / ramp_Step) + step_vector[j-1] count += 1 else: u[i, :] = step_vector[j] return u
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_random_sequence():\n\n seq = []\n [seq.append(np.random.choice(cs.DNA_BASES)) for _ in range(cs.LENGTH)]\n\n return seq", "def generate_random(self: object) -> None:\n self.random.set(Sequence.generate(length=50))", "def start_random_sequence(self) -> int:\n return random.randint(0, TWO_BYTES)", "def random(self, seq=None):\n if seq is None:\n seq = self.seq\n seq_list = list(seq)\n random.shuffle(seq_list)\n return \"\".join(seq_list)", "def _generate_random_number_for_each_sequence(total, sequence_number):\r\n current_total = 0\r\n r = []\r\n for n in range(sequence_number-1, 0, -1):\r\n current = random.randint(1, total - current_total - n)\r\n current_total += current\r\n r.append(current)\r\n r.append(total - sum(r))\r\n random.shuffle(r)\r\n\r\n return r", "def choice(seq):\r\n i = int(random() * len(seq))\r\n return seq[i]", "def generate():\n s = random_data.random_bytes(100)\n return generate_from_string(s)", "def generate() -> int:\n return randint(0, 1000000000)", "def get_random_sequence(length):\n sequence = ''\n for i in range(length):\n random_letter = format(random.randrange(9), 'x')\n sequence = '{}{}'.format(sequence, random_letter)\n return sequence", "def generate_sequence(self, n=100, initial_state=None):\n\n if initial_state is None:\n if self.pad:\n sequence = [START_OF_SEQ] * self.order\n else:\n sequence = list(random.choice(self.records.keys()))\n else:\n sequence = initial_state[:]\n\n for i in range(n):\n current_state = tuple(sequence[-self.order:])\n next_token = self.sample(current_state)\n sequence.append(next_token)\n\n if next_token == END_OF_SEQ:\n return sequence\n\n return sequence", "def random_sample(seq):\r\n if len(seq) = 0:\r\n return None\r\n return sample(seq, randint(1, len(seq)/2))", "def rngnext():\n out = []\n # random\n state = random.getstate()\n out.append(f\"r={random.random():0.4f}\")\n random.setstate(state)\n\n # numpy\n state = np.random.get_state()\n out.append(f\"n={np.random.random():0.4f}\")\n np.random.set_state(state)\n\n # torch\n state = torch.random.get_rng_state()\n out.append(f\"t={torch.rand(1)[0]:0.4f}\")\n torch.random.set_rng_state(state)\n\n # cuda\n if torch.cuda.is_available():\n state = torch.cuda.get_rng_state()\n # note there is no function for generating a random in cuda but this may work?\n out.append(f\"c={state.float().std()%1:0.4f} {torch.backends.cudnn.deterministic}\")\n\n return out", "def rand(self):\n raise NotImplementedError(\"Not implemented yet.\")", "def generate_raiz():\n\treturn os.urandom(12)", "def random():\r\n return R.NextDouble()", "def rand(self):\n raise NotImplementedError", "def random(self, n=1):\n # self.num_generated += n", "def generate_sequence(n):\n\n sequence = []\n\n # generate sequence\n while n != 1:\n sequence.append(n)\n n = next_integer(n)\n\n # append 1 to sequence since all sequences assumed to end in 1\n sequence.append(1)\n\n return sequence", "def random_seq(length, nucleic_acid='DNA'):\n \n if nucleic_acid == 'DNA':\n alphabet = ('A','C','T','G')\n elif nucleic_acid == 'RNA':\n alphabet = ('A','C','U','G')\n\n so_far = ''\n for i in range(length):\n so_far += random.sample(alphabet, 1)[0]\n return so_far", "def setseq():\n\n if seqRnd[0]:\n r = getrndseq(seqRndWeight)\n if seqRnd[0] == 1:\n gbl.seqCount = r\n r = -1\n else:\n r = -1\n\n return ( r, seqRnd[1:] )", "def getRandom(self) -> int:", "def getRandom(self) -> int:", "def gen_seq(self,ntrials=20,pm_trial_position=None):\n # insert ranomly positioned pm trials\n if type(pm_trial_position)==type(None):\n ntrials -= 1+self.num_pm_trials\n pm_trial_position = np.random.randint(self.min_start_trials,ntrials,self.num_pm_trials) \n else:\n ntrials -= 1+len(pm_trial_position)\n pm_trial_position = pm_trial_position\n # generate og stim\n seq = np.random.randint(0,self.ntokens_og,ntrials)\n X = np.insert(seq,[0,*pm_trial_position],self.pm_token)\n # form Y \n Xroll = np.roll(X,self.nback)\n Y = (X == Xroll).astype(int) # nback trials\n Y[X==self.pm_token]=2 # pm trials\n return X,Y", "def generate_sequence(seq_len, query_distribution):\n\n np.random.seed()\n\n #normailze the frequencies to form a distribution\n query_ids, distribution = zip(*query_distribution)\n distribution /= sum(np.array(distribution))\n\n return np.random.choice(query_ids, size=seq_len,\n replace=True, p=distribution)", "def rseq(start=0.0, stop=1.0, N=10, randomness=0.5):\n\n return (randomness * sort(start + (stop - start) * rand(N))\n + (1 - randomness) * frange(start, stop, npts=N))", "def random_values():\n while True:\n yield random()", "def generate_seq(self):\n\n # Variable initialization\n eos = False\n c_s = 99\n x = []\n y = []\n\n while not eos:\n\n # Start of sequence\n if c_s == 99:\n # Sample from initial\n c_s = self.sample_p(self.proba[\"initial\"])\n\n # Consecutive iterations\n\n # We generate until we get length of self length\n elif len(x) < self.length:\n # Sample from transition of last state\n c_s = self.sample_p(self.proba[\"transition\"][c_s])\n\n # Generate emission\n\n # Note that we append the states as labels and observations as input\n y.append(c_s)\n x.append(self.sample_p(self.proba[\"emission\"][c_s]))\n\n else:\n eos = True\n\n # We get the state ID by offseting their idx by the length of observations\n ofs = len(self.obs)\n y = [i + ofs for i in y]\n return (x, y)", "def generate_numbers():\n\n return random.sample(range(100), 10)", "def generate(self) -> List[str]:\n\n self._reset()\n\n res = self._get_interactions(\n random.randint(self._min_seq_len, self._max_seq_len))\n\n self._add_guarded_first_named_alloc(res)\n\n if random.randint(0, 1):\n # Add some noise between source and destination\n # Is this helpful? Why?\n noise = self._get_interactions(\n random.randint(self._min_intervening_len,\n self._max_intervening_len))\n res.extend(noise)\n\n res.append(self._get_second_named_alloc())\n\n return stringify_sequence(res)", "def random(n: int) -> bytes:\n return os.urandom(n)", "def rand(self):\n return self.State.rand()", "def genNum(num, len):\n seed = ['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']\n for i in range(num):\n print ''.join(random.sample(seed, len))", "def random(self):\r\n return random.randint(1, 4)", "def totem_random():\n random_head()\n random_head()\n random_head()", "def code_generator():\n digits = [str(num) for num in range(10)]\n random.shuffle(digits)\n return digits[:3]", "def random():\n np.random.seed(1939)", "def rand_elem(seq, n=None):\n return map(random.choice, repeat(seq, n) if n is not None else repeat(seq))", "def randomSub(seed: float):\n crc = str(string.ascii_letters + string.digits)\n random.seed(seed)\n n = random.randint(10,30)\n return \"\".join(random.sample(crc, n))", "def _gen_random_number() -> float:\n return uniform(0, 1000)", "def getRandomSequence(seqLength):\n nucleotides = (\"A\", \"C\", \"G\", \"T\")\n seq = \"\"\n for i in range(seqLength):\n seq += random.choice(nucleotides)\n \n dictionary = {\"description\": \"Random sequence | \" + str(seqLength) + \"bp\", \"type\": \"dna\", \"data\" : seq}\n \n return dictionary", "def get_random(self):\n base_genom = \"1\" * sum(self._size_var)\n return utils.randomise_a_string(base_genom)", "def generate_code(self):\n seeds = \"1234567890\"\n random_str = []\n for i in range(4):\n random_str.append(choice(seeds))\n\n return \"\".join(random_str)", "def generate_seed():\n global seed\n seed = []\n\n for char_id in range(0, len(printable)):\n while True:\n char_sequence = [printable[randint(0, len(printable)-1)], printable[randint(0, len(printable)-1)]]\n if char_sequence not in seed:\n break\n seed.append(char_sequence)", "def generate_random_rot():\n from pyso3.quaternion import quat2rot\n import numpy as np\n q = np.random.randn(4)\n q = q / np.linalg.norm(q)\n return quat2rot(q)", "def generate_one_sample(dimension, sequence_length, repeat_times):\n # produce random sequence\n sequence = np.random.binomial(\n 1, 0.5, (sequence_length, dimension - 1)).astype(np.uint8)\n\n # allocate space for input sequence and output sequence\n input_sequence = np.zeros(\n (sequence_length + 1 + sequence_length * repeat_times, # + 1\n dimension),\n dtype=np.bool)\n output_sequence = np.zeros(\n (sequence_length + 1 + sequence_length * repeat_times, # + 1\n dimension),\n dtype=np.bool)\n\n # set value of input sequence\n input_sequence[:sequence_length, :-1] = sequence\n # input_sequence[sequence_length, -1] = repeat_times\n input_sequence[sequence_length, -1] = 1\n\n # set value of output sequence ## sequence_length + 1\n output_sequence[sequence_length+1:, :-1] = \\\n np.tile(sequence, (repeat_times, 1))\n # \"1\": A special flag which indicate the begin of the output\n # output_sequence[sequence_length, -1] = 1\n\n # return the sample\n return input_sequence, output_sequence", "def getRandom(self):\n return self.nums[randint(0, len(self.nums)-1)]", "def get_random(self,num):\n return ''.join(sample('abcdefghijklmnopqrstuvwxyz1234567890!',8))", "def random_number_generator(arg1, arg2):\n return 42", "def random(cls):\n return cls(os.urandom(32))", "def random_number(length=6):\n return randint(10**(length-1), (10**(length)-1))", "def random():\n return constant(1)", "def generate_random_numbers(self):\r\n #random.seed(seed=self.seed)\r\n #err = random.random((3,1))\r\n #f = open('test_res', 'a')\r\n #f.write('probability - %s' %self.seed)\r\n #f.write(str(list(err[:3,:])))\r\n #f.write('\\n')\r\n #f.close()\r\n\r\n dist = RandomDistribution(self.seed)\r\n rand_numbers = dist.return_random_variables(self.num_agents)\r\n return rand_numbers", "def i_rand_a():\n return i_random() % 95 + 32", "def generate_rng(nrngs, startseed=None):\n start_rng = np.random.RandomState(startseed)\n for i in range(nrngs):\n yield np.random.RandomState(start_rng.randint(2**32))", "def rand_gen(below, baseline):\n\treturn secrets.randbelow(below)/ baseline", "def randomNumberGenerator(self):\n #infinite loop of magical random numbers\n print(\"Making random numbers\")\n while not thread_stop_event.isSet():\n number = random.randint(10000,99999)\n print(number)\n socketio.emit('newQrCode', str(number), namespace='/test')\n time.sleep(5)", "def random():\n np.random.seed(0)", "def _generate_seq(sn):\n a, b = 0, 1\n for i in range(sn):\n yield str(a) + ' '\n a, b = b, a+b", "def get_random_sequence(genome):\n \n chr_list = get_chromosome_length(genome)\n \n random_seq = {}\n chr = random.sample(chr_list.keys(),1) #select chromosome\n slen = random.randint(300,1000) #select sequence length\n if chr_list[chr[0]] - slen > 0:\n spos = random.randint(1,chr_list[chr[0]] - slen) #select start position\n \n seq = get_fragment(genome, chr[0], slen, spos)\n if seq.count(\"N\") > 0.1 * slen:\n seq = get_random_sequence(genome)\n else:\n seq = get_random_sequence(genome)\n \n return seq", "def generator(self, args, gen):\n import random\n\n if args.seed:\n random.seed(args.seed)\n seqs = [s for s in gen]\n sample_indices = random.sample(range(len(seqs)), min(len(seqs), args.number))\n for i in sample_indices:\n yield seqs[i]", "def randkey():\n return binascii.b2a_hex(os.urandom(15))", "def genRandom(self, bits):\n _rand = 0\n _bytes = bits // 8 + 8\n\n while(len(bin(_rand))-2 < bits):\n\n try:\n _rand = int.from_bytes(random_function(_bytes), byteorder='big')\n except:\n _rand = int(random_function(_bytes).encode('hex'), 16)\n\n return _rand", "def rand(self): # Method doctring\n\n self._last_rand = xorshift32(self._last_rand, self.triple)\n return self._last_rand", "def generateRandomString():\n return ''.join(b64encode(urandom(32)).decode('utf-8'))", "def makeChrom(length):\n output = []\n for i in range(length):\n output.append(randrange(14))\n return output", "def getRandom(self) -> int:\n count = len(self.arr)\n return self.arr[randint(0, count-1)]", "def random_body():\n extra = random.randint(1, 9)\n sequence = (hex(rand32()) for _ in range(extra))\n return hex(id_) + '\\n' + '\\n'.join(sequence)", "def generator(self, random, args):\r\n raise NotImplementedError", "def random_keys(self):\n while True:\n yield self.generator.str()", "def i_random():\n global randrsl, randcnt\n\n r = randrsl[randcnt]\n randcnt += 1\n if (randcnt > 255):\n isaac_()\n randcnt = 0\n\n return r", "def gensalt():\n return hexlify(os.urandom(24)).decode()", "def genKey(length=32):\r\n return os.urandom(length)", "def sample(self):\n seq = []\n for i in range(self._pwm.shape[1]):\n p = numpy.array(self._pwm[:, i], dtype=numpy.float64)\n p /= p.sum()\n seq.extend(numpy.random.choice(self.alphabet, p=p))\n return \"\".join(seq)", "def rs():\n return random.choice([-1,1])", "def rs():\n return random.choice([-1,1])", "def genRandString(dl = 10):\n ret = ''\n for i in range(dl) :\n ret += random.choice(string.ascii_letters + string.digits)\n return ret", "def gen_rand(l):\n w = int(l / 2)\n\n min = (1 << (w - 1)) | 1\n max = (1 << w) - 1\n\n n = random.randrange(min, max) | 1\n\n return n", "def getRandom(self) -> int:\n steps = random.randint(0, self.len-1) # 随机抽取一个\n temp = self.head\n for i in range(steps):\n temp=temp.next\n return temp.val", "def random_num(self):\r\n self.generate_n1()\r\n self.generate_n2()\r\n self.generate_n3()\r\n self.generate_n4()\r\n random_number = str(self.n_1decimal)+str(self.n_2decimal)+str(self.n_3decimal)+str(self.n_4decimal)\r\n print int(random_number)", "def getRandomAngle():\r\n\treturn random.random() * math.pi * 2", "def seed_random(max_integer):\n return random.randrange(0,max_integer);", "def generator(self, random, args):\n\t\traise NotImplementedError", "def rand_ident():\n return random.randrange(MAX_IDENT)", "def gen_code():\n return ''.join([random.choice(string.ascii_uppercase + string.digits) for _ in range(10)])", "def _random_issn():\n first = randint(1000, 9999)\n second = randint(100, 999)\n return str(first) + \"-\" + str(second) + str(_select_from([1, 2, 3, 4, 5, 6, 7, 8, 9, \"X\"]))", "def generate(self):\n node = self.generate_random()\n\n while True:\n yield node.state[-1]\n if len(node.next_states) != 0:\n node = node.get_next_state()\n if node == None:\n node = self.generate_random()\n while len(node.next_states) == 0:\n node = self.generate_random()\n else:\n node = self.generate_random()\n while len(node.next_states) == 0:\n node = self.generate_random()", "def generateSequenceBias(self, bias):\n\n if bias < 0 or bias > 1:\n raise ValueError(\"Bias must be a value between 0 and 1.\")\n else:\n for i in range(self.length):\n self.sequence.append(0 if random.random() < bias else 1)\n self.biasSeq = 1\n self.bias = bias", "def random_generator(nurses_number: int = 10):\n\n # For each possible shift of all the nurses, is generated randomly a value to define as allocated or not\n state = ''\n\n # The range goes from 0 to 21*nurses_number. This happens because we every time have 21 shifts to n nurses\n for i in range(0, 21 * nurses_number):\n state = state + str(randrange(0, 2))\n\n # Return the new state generated\n return state", "def _generate_string_seq():\n input_word_num = random.randint(1, config.MAX_INPUT_WORD_NUMBER)\n return ' '.join(resources.get_random_words(input_word_num))", "def __generate_random_gene_sequence(self):\n genes = []\n for j in range(self.chromosome_size):\n genes.append(random.choice(self.gene_pool))\n\n return genes", "def my_random(a):\r\n import random\r\n r = random.randint(0, 100)\r\n return a + r", "def make_repeatable():\n random.seed(1234)\n np.random.seed(1234)", "def gen_random(\n l: list,\n n: int,\n seed: int = None\n ) -> str:\n\n # Initialisations\n s = \"\"\n\n # Loop for the desired length of the string\n for i in range(0, n):\n\n if seed is not None:\n\n numpy.random.seed(seed + i)\n\n # Append the next random character\n s += numpy.random.choice(l)\n\n return s", "def generate_custom_sequence(program, pass_space=DEFAULT_GENE_POOL,\n debug=False):\n global print_out\n print_out = debug\n return simulate_generations(pass_space, program)", "def randomSeq(n, a, b):\n \n return [\n Complex(a + np.random.random()*(b-a), a + np.random.random()*(b-a))\n for _ in range(n)\n ]", "def random_sequence(amount=100, start=0, stop=0, reverse=False):\n \n sequence = []\n if start == stop:\n for i in range(amount):\n sequence.append(stop)\n elif start < stop and not reverse: \n for i in range(amount):\n number = random.randrange(start, stop)\n sequence.append(number)\n sequence.sort()\n elif start < stop and reverse: \n for i in range(amount):\n number = random.randrange(start, stop)\n sequence.append(number)\n sequence.sort(reverse=True)\n\n elif start > stop and not reverse: \n for i in range(amount):\n number = random.randrange(stop, start)\n sequence.append(number)\n sequence.sort()\n elif start > stop and reverse: \n for i in range(amount):\n number = random.randrange(stop, start)\n sequence.append(number)\n sequence.sort(reverse=True)\n return sequence", "def getRandom(self) -> int:\n return random.choice(tuple(self.l))", "def computer_generate(self):\n return choice[random.randrange(3)]", "def rand(lo=0, hi=1):\n global Seed\n Seed = (16807 * Seed) % 2147483647\n return lo + (hi - lo) * Seed / 2147483647", "def random_gene(self):\n size = random.randint(1,50)\n gene = \"\"\n for i in range(0,size,1):\n gene+=random.choice(self.instructions)\n return gene", "def shotgenerator():\n return random.randint(0, 9), random.randint(0, 9)" ]
[ "0.7697955", "0.7600561", "0.7551118", "0.75248724", "0.7125383", "0.7092031", "0.69861037", "0.69277364", "0.6886229", "0.6868949", "0.6833825", "0.6803206", "0.67367864", "0.67331696", "0.6730156", "0.6715812", "0.6699407", "0.66590357", "0.66281706", "0.66149676", "0.65995187", "0.65995187", "0.6595084", "0.65926003", "0.6589287", "0.6526456", "0.6507666", "0.6493813", "0.64916354", "0.64704496", "0.64284474", "0.6412195", "0.6389472", "0.6383682", "0.6376732", "0.63756526", "0.63748145", "0.6361774", "0.6359661", "0.6349557", "0.6347794", "0.6343306", "0.63256556", "0.6319575", "0.6316664", "0.6297366", "0.62889344", "0.6288031", "0.62869245", "0.6272077", "0.6254106", "0.62495464", "0.62472504", "0.62420136", "0.6236023", "0.62344503", "0.62164193", "0.6213061", "0.62045175", "0.61961657", "0.6188359", "0.6182299", "0.6170367", "0.61689585", "0.61681896", "0.6154074", "0.6146085", "0.6143121", "0.6138793", "0.6134056", "0.6122844", "0.61178416", "0.6107172", "0.61066616", "0.61066616", "0.6104341", "0.61021745", "0.60951346", "0.6094966", "0.609262", "0.6077214", "0.60750395", "0.6070348", "0.60690355", "0.6063439", "0.6060368", "0.60598695", "0.6059355", "0.6054469", "0.60532415", "0.60517377", "0.6040376", "0.6034677", "0.60313475", "0.60302454", "0.60285693", "0.60275376", "0.60235995", "0.60178214", "0.6015", "0.6013653" ]
0.0
-1
Settings for a random step sequence
def __init__(self, step_time, saw_time, delta_t, mu=None, sigma=None, n_step=None, ss=None): self.ss = ss self.n_step = n_step self.mu = mu self.sigma = sigma self.step_time = step_time self.saw_time = saw_time / delta_t
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def random_step(self):\n pos = [i for i in range(9) if self.grid[i] == 0]\n move = random.choice(pos)\n return self.step(move)", "def random_step(self):\n pos = [i for i in range(9) if self.grid[i] == 0]\n move = random.choice(pos)\n return self.step(move)", "def generate_random(self: object) -> None:\n self.random.set(Sequence.generate(length=50))", "def take_step(self):\n choices_of_steps = [(0,1), (1,0), (0,-1), (-1,0)]\n return random.choices(choices_of_steps)[0]", "def _setup_next_sequence(cls):\n return 0", "def randomize_trajectory(self):\n self.angle = randint(-360, 360)\n self.speed = randint(1, 5)/2.5", "def setseq():\n\n if seqRnd[0]:\n r = getrndseq(seqRndWeight)\n if seqRnd[0] == 1:\n gbl.seqCount = r\n r = -1\n else:\n r = -1\n\n return ( r, seqRnd[1:] )", "def _setVals(self, step=0):\n self.step = step", "def evaluate_config(rnd: int):\n val_steps = 5 if rnd < 4 else 10\n return {\"val_steps\": val_steps}", "def init_random_state(self):\n self.current_state = self.rng.uniform(size=[1, self.num_spins])\n self.current_state = np.where(self.current_state < 0.5, -1.0, 1.0)", "def experiment4():\n np.random.seed()\n state['result'] = np.random.rand(1)", "def test_case_generate(self):\n\n # initialization\n state = np.random.choice(self.init_states)\n model = rm.randint(0, self.model_num - 1)\n duration = np.random.choice(self.step_values)\n temp = rm.randint(self.min_temp, self.max_temp)\n\n self.states = [[model, duration, temp]]\n self.time = duration\n\n while self.time < self.max_time:\n if state == \"inc_tmp\":\n change = np.random.choice(\n self.transitionName[0], p=self.transitionMatrix[0]\n ) # choose the next state\n if change == \"S1S1\": # stay in the same state\n temp = self.get_temp_inc(temp)\n model = rm.randint(0, self.model_num - 1)\n diff = (\n self.max_time - self.time\n ) # this is for ensuring the maximum duration is not exceeded\n if (diff) < self.max_step and (diff) > self.min_step:\n duration = diff\n self.states.append([model, duration, temp])\n return self.states_to_dict()\n elif diff < self.min_step:\n self.states[len(self.states) - 1][1] += diff\n return self.states_to_dict()\n else:\n duration = np.random.choice(self.step_values)\n self.time += duration\n self.states.append([model, duration, temp])\n\n elif change == \"S1S2\": # change from increase to decrease\n temp = self.get_temp_dec(temp)\n model = rm.randint(0, self.model_num - 1)\n state = \"dec_tmp\"\n\n diff = self.max_time - self.time\n if (diff) < self.max_step and (diff) > self.min_step:\n duration = diff\n self.states.append([model, duration, temp])\n return self.states_to_dict()\n elif diff < self.min_step:\n self.states[len(self.states) - 1][1] += diff\n return self.states_to_dict()\n else:\n duration = np.random.choice(self.step_values)\n self.time += duration\n self.states.append([model, duration, temp])\n else:\n print(\"Error\")\n\n elif state == \"dec_tmp\":\n change = np.random.choice(\n self.transitionName[1], p=self.transitionMatrix[1]\n )\n if change == \"S2S1\":\n temp = self.get_temp_inc(temp)\n model = rm.randint(0, self.model_num - 1)\n state = \"inc_tmp\"\n diff = self.max_time - self.time\n if (diff) < self.max_step and (diff) > self.min_step:\n duration = diff\n self.states.append([model, duration, temp])\n return self.states_to_dict()\n elif diff < self.min_step:\n self.states[len(self.states) - 1][1] += diff\n return self.states_to_dict()\n else:\n duration = np.random.choice(self.step_values)\n\n self.time += duration\n self.states.append([model, duration, temp])\n\n elif change == \"S2S2\":\n temp = self.get_temp_dec(temp)\n model = rm.randint(0, self.model_num - 1)\n\n diff = self.max_time - self.time\n if (diff) < self.max_step and (diff) > self.min_step:\n duration = diff\n self.states.append([model, duration, temp])\n return self.states_to_dict()\n elif diff < self.min_step:\n self.states[len(self.states) - 1][1] += diff\n return self.states_to_dict()\n else:\n duration = np.random.choice(self.step_values)\n self.time += duration\n self.states.append([model, duration, temp])\n\n else:\n print(\"Error\")\n pass\n else:\n print(\"Error\")\n\n return self.states_to_dict()", "def next_state(self):\n \n self.state = np.random.choice(['checkout', 'dairy', 'drinks', 'fruit', 'spices'], p=self.tr_array_dict[f'{self.state}'])", "def set_rand_seed(self, idx):\n random.seed(self.base_seed + self.epoch + idx // 2)", "def step(self):\n\n self.agents[random.randint(self.get_agent_count())].step()\n self.steps += 1\n self.time += 1", "def setUp(self) -> None:\n self.random = np.random.RandomState(seed=42)", "def sequence_params(self):", "def initialize_random_number_generator(self,question_type):\n\t\tself.generator.seed(self.generate_index(self.magic, self.level, self.problem_id, question_type))", "def setRandom(self):\n pass # define each VarElement family", "def swait_setup_random_number(swait, **kw):\n swait.reset()\n swait.scan.put(\"Passive\")\n swait.calc.put(\"RNDM\")\n swait.scan.put(\".1 second\")\n swait.desc.put(\"uniform random numbers\")", "def nextPhase(self):\n\n if self.sensorType == SENSOR_TYPES[\"TEMP\"]:\n self.value = self.randGen.choice(TEMP_RANGE)\n else:\n self.value = self.randGen.randint(0, 100)", "def random_walk(turtle, distance, steps):\n turtle.color(randcolor(), randcolor())\n for step in range(0,steps):\n random_move(turtle, distance)\n gohome(turtle)", "def seed():", "def setUp(self):\n self.samples = 5\n self.otus = 10\n seed(0) # this will seed numpy prng at 0 before each test", "def set_first_machine_time_step(self, first_machine_time_step):", "def step_particles(particle,self):\n\n self.models[particle].step()\n\n self.states[particle] = (self.models[particle].agents2state()\n\n + np.random.normal(0, self.particle_std**2, \n\n size=self.states[particle].shape))\n\n self.models[particle].state2agents(self.states[particle])\n\n return self.models[particle], self.states[particle]", "def set_IStep(self, istep = [0], istep_sigma = [0], tstep = 5, tdur = 1e6, give_freq = True):\n if self.do_run:\n #for m in self.ic_steps:\n # m.destroy()\n # del m \n #del self.ic_steps\n \n #self.ic_steps = []\n \n istep = list(istep)\n neg = False\n \n for n in range(self.n_celltypes):\n \n if istep[n] < 0: \n neg = True\n istep[n] = abs(istep[n]) # make positive again\n \n if istep[n] != 0:\n if give_freq is True:\n a = np.array([istep[n]])\n iin = self.get_i(a, n)[0]\n if self.id == 0: print \"celltype: \", n, \" istep: \", istep[n], \"Hz => \", iin, \" nA\"\n istep[n] = iin \n \n for n in range(self.n_celltypes):\n \n for i, gid in enumerate(self.gidlist[n]): # for every cell in the gidlist \n \n np.random.seed(gid*30)\n \n if self.i_holdrs == []:\n \n if istep_sigma[n] != 0:\n istep_r = np.random.normal(istep[n], istep[n]*istep_sigma[n], 1).clip(min=0)\n else: # same ihold for all cells!\n istep_r = istep[n]\n \n else: # ihold has been set!\n \n if istep_sigma[n] != 0:\n istep_r = np.random.normal(istep[n]-self.i_holds[n], (istep[n]-self.i_holds[n])*istep_sigma[n], 1).clip(min=0) # delta now! put on top of hold!\n else: # same ihold for all cells!\n istep_r = istep[n]-self.i_holds[n] # delta now! put on top of hold!\n \n if neg:\n istep_r = -1*istep_r\n \n if istep[n] == 0:\n istep_r = -1*self.i_holdrs[n][i] \n \n #print 'is:' + str(istep_r) + 'was:' + str(self.i_holdrs[n][i])\n \n if istep_r != 0: \n # step current\n ic_step = h.IClamp(self.cells[n][i].soma(0.5))\n ic_step.delay = tstep/ms\n ic_step.dur = tdur/ms\n ic_step.amp = istep_r/nA\n self.ic_steps.append(ic_step)\n \n \n if self.id == 0: print \"set_IStep finished. istep: \", istep, \", istep_sigma: \", istep_sigma", "def _choose_sample(self):\n\n \t #periodically generate a new reconstruction for the purposes of sampling", "def setUp(self):\n # record the randomness used in case the test fails:\n self.rand_seed = int(time.time())\n sr.seed(self.rand_seed)\n print(\"seed for this test: \" + str(self.rand_seed))", "def totem_random():\n random_head()\n random_head()\n random_head()", "def simulate(self):\n self.round += 1", "def set_seed(self, seed: int):\n self.rsimulator.set_seed(seed)\n # Maybe call new game here?", "def NewRndSeed(ss):\n ss.RndSeed = int(datetime.now(timezone.utc).timestamp())", "def setUp(self):\n # record the randomness used in case the test fails:\n rand_seed = int(time.time())\n sr.seed(rand_seed)\n print(\"seed for this test: \" + str(rand_seed))", "def __random_movement(self):\n\t\tself.__steps += 1 \t\t# Increment after every frame\n\t\t# When __steps greater than threshold reverse the direction\n\t\t# and set threshold to a new random value\n\t\tif self.__steps >= self.__threshold_steps:\t\n\t\t\tif self.direction == 'RIGHT':\n\t\t\t\tself.move_left()\n\t\t\t\tself.direction = 'LEFT'\n\t\t\telse:\n\t\t\t\tself.move_right()\n\t\t\t\tself.direction = 'RIGHT'\n\t\t\tself.__threshold_steps = random.randint(25,50)\n\t\t\tself.__steps = 0\n\t\t# Confines the Donkeys movement to within the boundary \n\t\tself.__check_boundary()", "def rand(self):\n raise NotImplementedError", "def step(self, state):", "def random(self, n=1):\n # self.num_generated += n", "def random_params_gen(self) -> TransformParams:\n while True:\n do_hor_flip = self.horizontal_flip and (np.random.random() < 0.5)\n do_vert_flip = self.vertical_flip and (np.random.random() < 0.5)\n\n yield TransformParams(do_hor_flip=do_hor_flip,\n do_vert_flip=do_vert_flip)", "def SetRandomSeed(seed):\n global option\n option['random_seed'] = seed", "def __init__(self, random_state):\n self.random_state = random_state\n self.random_generator = RandomState(self.random_state)", "def _sample_seed(self):\n raise Exception(\" not implemented in base model\")", "def start_new_chain(self, random_seed=None):\n if random_seed is not None:\n np.random.seed(random_seed)\n\n if self.n_burning > 0:\n parameters = self.sample_parameters(float(self.n_burning) / (self.thinning + 1))\n else:\n parameters = [self.samples_parameters[-1]]\n\n self.samples_parameters = []\n self.samples_parameters.append(parameters[-1])\n self.start_point_sampler = parameters[-1]", "def setup_method(cls):\n seed()", "def corun(self, step:int=0):\n if step > 0:\n _range = range(self.current_step, self.current_step + step + 1)\n else: # run forever\n _range = itertools.count(self.current_step)\n for step_num in _range:\n self.config_template = (yield self.step()) or self.config_template", "def randomize(self):\n \n spins = [np.random.random() > 0.5 for x in range(self.size)]\n self.spins_initial = bitarray.bitarray(spins)", "def step(self, steps):\n self._simulate(endStep=self.currentStep+steps)", "def _sample_steps(self):\n mixture_size = self.parameters['fixed_mixture_size']\n if not self.is_correlated_mixture and mixture_size is None:\n return self.get_steps('monte_carlo')\n else:\n return self.get_steps('metropolis')", "def _random_warmup(self, num_steps):\n new_frame = self.env.reset()\n reward = 0.0\n action = 0\n done = False\n self.memory.add_experience(action, reward, new_frame, 1, done)\n \n for i in range(num_steps):\n \n action = np.random.randint(self.num_actions)\n new_frame, reward, done, _ = self.env.step(action)\n self.memory.add_experience(action, reward, new_frame, 1, done)\n \n if done:\n new_frame = self.env.reset()\n self.memory.add_experience(0, 0.0, new_frame, 1, False)\n\n self.memory.add_experience(0, 0.0, new_frame, 1, True)", "def rand(self):\n self.state = (self.a * self.state + self.c)\n return self.state", "def randomize_position(self, w, steps = 3):\n \n #self.red.set_power(0)\n \n for k in range(steps):\n for idx,waveplate in enumerate(w):\n print '* Randomizing %s waveplate (step %d) ...'%(waveplate, k)\n self.rotator.quick_scan(np.random.uniform(low = -20000, high = 20000) ,getattr(self,'_'+waveplate+'_channel'))", "def update_random_state(self):\n self.random_state = RandomState()", "def setSeqRnd(ln):\n\n global seqRnd\n\n emsg = \"use [ON, OFF or TrackList ]\"\n if not ln:\n error(\"SeqRnd:\" + emsg)\n\n a=ln[0].upper()\n\n if a in (\"ON\", \"1\") and len(ln) == 1:\n seqRnd = [1]\n\n elif a in (\"OFF\", \"0\") and len(ln) == 1:\n seqRnd = [0]\n\n else:\n seqRnd=[2]\n for a in ln:\n a = a.upper()\n if not a in gbl.tnames:\n error(\"SeqRnd: Track '%s' does not exist, %s\" % (a, emsg))\n if a in seqRnd:\n error(\"SeqRnd: Duplicate track '%s' specified, %s\" % (a, emsg))\n seqRnd.append(a)\n\n if gbl.debug:\n print \"SeqRnd:\",\n if seqRnd[0] == 2:\n for a in seqRnd[1:]:\n print a,\n print\n elif seqRnd[0] == 1:\n print \"On\"\n else:\n print \"Off\"", "def set_seed(self,seed):\r\n if seed is None:\r\n warnings.warn(\r\n \"Initializing player with seed from Axelrod module random number generator. \"\r\n \"Results may not be seed reproducible.\")\r\n self._seed = _module_random.random_seed_int()\r\n else:\r\n self._seed = seed\r\n self._random = RandomGenerator(seed=self._seed)\r\n self.base._random = self._random\r\n self.trust._random = self._random\r\n self.conviction._random = self._random\r\n \r\n self.generator = torch.Generator()\r\n self.generator.manual_seed(int(seed))", "def step(self, step=None):\n pass", "def start_random_sequence(self) -> int:\n return random.randint(0, TWO_BYTES)", "def rand(self):\n raise NotImplementedError(\"Not implemented yet.\")", "def configure_step(self):\n pass", "def setRandDirection(self):\n phi = 2*math.pi*random.random()\n u = 2*random.random() - 1\n v = math.sqrt(1-u*u)*math.cos(phi)\n w = math.sqrt(1-u*u)*math.sin(phi)\n self.direction = (u,v,w)", "def set_states(self, states):\n if states is None:\n logging.getLogger('eval').warning(\n 'could not reproduce state, setting unreproducable random seed for all random states')\n self.randomstate.seed(np.random.randint(0, 1000000))\n if hasattr(self, 'random_mask_state'):\n self.random_mask_state.seed(np.random.randint(0, 100000))\n if hasattr(self, 'deformrandomstate'):\n self.deformrandomstate.seed(np.random.randint(0, 100000))\n else:\n if hasattr(self, 'random_mask_state') and 'random_mask_state' in states:\n self.random_mask_state.set_state(states['random_mask_state'])\n if hasattr(self, 'deformrandomstate') and 'deformrandomstate' in states:\n self.deformrandomstate.set_state(states['deformrandomstate'])\n self.randomstate.set_state(states['randomstate'])", "def configure_step(self):\n\n pass", "def setSeqRndWeight(ln):\n\n global seqRndWeight\n\n seqRndWeight = getweights(ln, \"SeqRndWeight\")", "def setRandomSensitivitySpeed(self) -> None:\n\n self.sensitivity = randint(20, 70)\n self.speed = randint(7, 12)", "def __dice_generator(self):\n self.current_dice = np.random.randint(1, 6 + 1)", "def seed(seed: int):\n # all sampling is actually happening in the move_cube module\n move_cube.seed(seed)", "def SetStepDelay(self,delay=200): \n self.Bus.Transaction(chr(self.Address)+chr(0x43)+chr(delay))", "def __init__(self):\n \"\"\" action_ space : pick up location , Drop location\n state_space : location , time (hours) , day\n state_init : random pick from the state_space \"\"\"\n self.action_space = [(i,j) for i in range(m) for j in range(m) if i!=j or i==0]\n # Total states (Xi Tj Dk)\n self.state_space = [[x, y, z] for x in range(m) for y in range(t) for z in range(d)]\n # random Initialize of state (location, hours, day)\n self.state_init = random.choice(self.state_space)\n # Start the first round\n self.reset()", "def _random_start_position(self):\r\n self.position = np.array(random.choice(self.start_positions),\r\n dtype=np.int16)", "def _rgbSequenceInit(self):\n ## send all of this to sequence acq\n if not self.nbFrames:\n self.nbFrames = int(self.duration/self.cycleTime)+1 ## Determine number of frames. (+1) because int round at the lower int\n self.ledSeq = [0]*self.rgbLedRatio[0]+[1]*self.rgbLedRatio[1]+[2]*self.rgbLedRatio[2] #Sequence of LED lighting in function of the ratio\n #RED = 0\n #GREEN = 1\n #BLUE = 2\n print('LED sequence : ', self.ledSeq)\n self.ledList = self.ledSeq*(int(self.nbFrames/(len(self.ledSeq)))+1) ## schedule LED lighting\n #NB : no return needed because each ledList and nbFrames are instance attribute", "def reinitialize(self, random_state):\n pass", "def _set_seed(self) -> None:\r\n random.seed(self.seed)\r\n np.random.seed(self.seed)", "def trial_config(self, prev_config, cov_config=1e-2):\r\n return prev_config + np.random.normal(0, cov_config, len(prev_config))", "def randomize_value(self) -> None:", "def make_step(self):\n self.step_vals = np.cumsum(self.vals)", "def gen_random_walk(self,n_step=100):\n # Warning about the small number of steps\n if n_step < 30:\n print(\"WARNING! The number of steps is small. It may not generate a good stochastic process sequence!\")\n \n w = np.ones(n_step)*self.x0\n \n for i in range(1,n_step):\n # Sampling from the Normal distribution with probability 1/2\n yi = np.random.choice([1,-1])\n # Weiner process\n w[i] = w[i-1]+(yi/np.sqrt(n_step))\n \n return w", "def set_seed(self, seed=None):\n super().set_seed(seed=seed)\n for t in self.policy_list:\n t.set_seed(self._random.random_seed_int())", "def _step(self):\n title()\n self.runCount = 1\n self.experiment.pause = False\n self._runExperiment()\n self.pause = True", "def reset(self):\n \n self.steps = 0\n if self.episode == 0:\n self.ins = random.uniform(self.mins.values[:4],self.maxes.values[:4])\n #get the corresponding outputs:\n out_flow = self.temp_func(var=self.O_CH4_flow_uniformity)\n out_frac = self.temp_func(var=self.O_CH4_mol_frac)\n out_temp = self.temp_func(var=self.O_t)\n\n outs = np.array([out_flow,out_frac,out_temp])\n self.starts = np.append(self.ins, outs)\n\n else:\n self.starts = self.state[:7] #previous episode's end state\n\n #get goals from random inputs:\n viable = False\n while viable == False:\n self.ins = random.uniform((self.mins.values[:4]+(self.mins.values[:4]*self.minmaxbuffer)),self.maxes.values[:4]-(self.maxes.values[:4]*self.minmaxbuffer))\n out_flow = self.temp_func(var=self.O_CH4_flow_uniformity)\n out_frac = self.temp_func(var=self.O_CH4_mol_frac)\n out_temp = self.temp_func(var=self.O_t)\n\n outs = np.array([out_flow,out_frac,out_temp])\n \n # Check if viable:\n viable = self.test_viable(outs)\n\n self.goals = outs\n\n # These are your current inputs:\n self.ins = self.starts[:4]\n # State carries the starting points and the goals.\n self.state = np.append(self.starts,self.goals)\n\n #Track episodes and total reward.\n self.episode += 1\n self.tot_rew = 0\n\n return (self.state)", "def step(self, **kwargs):\n pass", "def reset(self, setup=False):\n self._done = False\n self._nbSteps = 0\n\n x = None\n if (self.startPosX == 'random' and setup) or (\n self.startPosX == 'episodeRandom'):\n x = random.randint(0, self._width - 1)\n elif (self.startPosX == 'random' and not setup):\n x = self._initState[0]\n elif self.startPosX == 'center':\n x = self._width - 1\n else:\n x = int(self.startPosX)\n\n y = None\n if (self.startPosX == 'random' and setup) or (\n self.startPosX == 'episodeRandom'):\n y = random.randint(0, self._height - 1)\n elif (self.startPosY == 'random' and not setup):\n y = self._initState[1]\n elif self.startPosX == 'center':\n y = self._height - 1\n else:\n y = int(self.startPosX)\n\n self._currentPos = (x, y)\n self._trajectory = [(x, y)]\n\n return (x, y)", "def test_random_movement(\n size: Union[int, tuple], num_berries: int, delay_seconds: int, number_steps: int\n) -> None:\n game = Game(\n size,\n [0, 0],\n -1,\n 5,\n -5,\n 10,\n num_berries,\n berry_movement_probabilities=[0.5] * num_berries,\n )\n print(f\"Starting board:\\n{game.get_board()}\")\n done = False\n i = 1\n while not done and i < number_steps:\n print(f\"Action {i}\")\n time.sleep(delay_seconds)\n _, reward, done = game.step(random.choice(MOVEMENTS))\n print(f\"Board:\\n{game.get_board()}\")\n print(f\"Reward: {reward}\")\n i += 1", "def setup_steps(self):\n step1 = ground_step.Ground(5745, 495, 40, 44)\n step2 = ground_step.Ground(5788, 452, 40, 44)\n step3 = ground_step.Ground(5831, 409, 40, 44)\n step4 = ground_step.Ground(5874, 366, 40, 176)\n\n step5 = ground_step.Ground(6001, 366, 40, 176)\n step6 = ground_step.Ground(6044, 408, 40, 40)\n step7 = ground_step.Ground(6087, 452, 40, 40)\n step8 = ground_step.Ground(6130, 495, 40, 40)\n\n step9 = ground_step.Ground(6345, 495, 40, 40)\n step10 = ground_step.Ground(6388, 452, 40, 40)\n step11 = ground_step.Ground(6431, 409, 40, 40)\n step12 = ground_step.Ground(6474, 366, 40, 40)\n step13 = ground_step.Ground(6517, 366, 40, 176)\n\n step14 = ground_step.Ground(6644, 366, 40, 176)\n step15 = ground_step.Ground(6687, 408, 40, 40)\n step16 = ground_step.Ground(6728, 452, 40, 40)\n step17 = ground_step.Ground(6771, 495, 40, 40)\n\n step18 = ground_step.Ground(7760, 495, 40, 40)\n step19 = ground_step.Ground(7803, 452, 40, 40)\n step20 = ground_step.Ground(7845, 409, 40, 40)\n step21 = ground_step.Ground(7888, 366, 40, 40)\n step22 = ground_step.Ground(7931, 323, 40, 40)\n step23 = ground_step.Ground(7974, 280, 40, 40)\n step24 = ground_step.Ground(8017, 237, 40, 40)\n step25 = ground_step.Ground(8060, 194, 40, 40)\n step26 = ground_step.Ground(8103, 194, 40, 360)\n\n step27 = ground_step.Ground(8488, 495, 40, 40)\n\n self.step_group = pygame.sprite.Group(step1, step2,\n step3, step4,\n step5, step6,\n step7, step8,\n step9, step10,\n step11, step12,\n step13, step14,\n step15, step16,\n step17, step18,\n step19, step20,\n step21, step22,\n step23, step24,\n step25, step26,\n step27)", "def __init__(self, env, random_seed=None):\n self.env = env \n self.RandomState = np.random.RandomState(random_seed)", "def create_step_samples(self):\n pass # Deferred to subclasses\n\n \"\"\" Example using pod height:\n start_value = self.sim.pod.last_height\n end_value = self.sim.pod.height\n\n # Lerp values to get samples\n samples = start_value + self.step_lerp_pcts * (end_value - start_value) # Or use self.lerp(start_value, end_value), but doing it directly is faster since no function call\n if self.noise_scale > 0:\n # Add gaussian noise if specified\n return samples + np.random.normal(0.0, noise_scale, len(samples))\n else:\n # No noise\n return samples \n \"\"\"", "def Gen_RandLine(length, step_max, dims=2):\n \n lineData = np.empty((dims, length))\n lineData[:, 0] = np.random.rand(dims)\n for index in range(1, length):\n step = ((np.random.rand(dims) - 0.5)*step_max)\n lineData[:, index] = lineData[:, index - 1] + step\n return lineData", "def setUp(self):\n self.t = True\n self.f = False\n self.value = 25", "def __init__(self):\n \n smach.State.__init__(self, \n outcomes=['GoToNormal','GoToPlay'])\n \n self.rate = rospy.Rate(200) # Loop at 50 Hz", "def setUp(self):\n shape = RNG.integers(5, 50)\n periods = self.periods = RNG.normal() * 3\n freq = periods / shape\n amp = RNG.normal()\n offset = RNG.normal()\n phase = (RNG.normal() - 1 / 2) * 5 / 3 * np.pi\n p_gt = self.p_gt = (amp, freq, phase, offset)\n x = self.x = np.arange(shape)\n self.data = sine(x, *p_gt)", "def set_seed(self, seed: int):\n self.__sim.seed(seed)", "def __init__(self, allow_step_back=False):\n self.allow_step_back = allow_step_back\n self.np_random = np.random.RandomState()\n \"\"\" No big/small blind\n # Some configarations of the game\n # These arguments are fixed in Leduc Hold'em Game\n # Raise amount and allowed times\n self.raise_amount = 2\n self.allowed_raise_num = 2\n self.num_players = 2\n \"\"\"\n # Some configarations of the game\n # These arguments can be specified for creating new games\n\n # Small blind and big blind\n self.small_blind = 1\n self.big_blind = 2 * self.small_blind\n\n # Raise amount and allowed times\n self.raise_amount = self.big_blind\n self.allowed_raise_num = 2\n\n self.num_players = 2", "def test_init(self):\n global_step = tf.get_variable(\"global_step\", [], tf.int32,\\\n initializer=tf.constant_initializer(0, dtype=tf.int32),\n trainable=False)\n lstm_pi = LSTMPolicy((80,80,3), 4,global_step)", "def __init__(self, rate):\n super(RandomWander, self).__init__()\n self.iteration = 0\n self.rate = rate\n self.speed = 0\n self.heading = 0", "def step(self): \n self.reset_parameters()\n\n if np.random.uniform(0, 1) < self.model.churn_prob: self.exit_triggered = True \n if self.exit_triggered:\n self.exit()\n else:\n self.register_deposit(self.deposit_intent)\n self.register_contribution(self.contribution_intent)\n self.register_sponsorship(self.sponsor_intent)\n self.register_euro_exchange(self.euro_exchange_intent)\n self.register_teo_exchange(self.teo_exchange_intent)\n self.register_withdraw(self.withdraw_intent)", "def rand(self):\n return self.State.rand()", "def test_init():\n rng = NonRandom()\n seed = 5\n rng.setSeed(seed)\n wheel = Wheel(rng)\n assert len(wheel.bins) == 38\n assert wheel.rng.value == seed\n assert wheel.rng.choice(range(0, 38)) == range(\n 0, 38)[wheel.rng.value] # == seed", "def generator(self, random, args):\r\n raise NotImplementedError", "def test_run_sim_1():\n rnd = rand.Arrivals(36, 41)\n sim.run_sim(3, 2, 5, 6, 22, rnd)", "def play_random(env, steps):\n try:\n done = True\n progress = tqdm(range(steps))\n for _ in progress:\n if done:\n _ = env.reset()\n action = env.action_space.sample()\n _, reward, done, info = env.step(action)\n progress.set_postfix(reward=reward, info=info)\n env.render()\n except KeyboardInterrupt:\n pass\n # close the environment\n env.close()", "def make_repeatable():\n random.seed(1234)\n np.random.seed(1234)", "def __init__(self, initial_value, n_values, schedule):\n self.step = 0.\n self.initial_value = initial_value\n self.nvalues = n_values\n self.schedule = SCHEDULES[schedule]", "def get_next_sample(self):" ]
[ "0.6600735", "0.6600735", "0.6426488", "0.63580966", "0.63181347", "0.6233752", "0.6195526", "0.61665463", "0.6133349", "0.6077554", "0.60269153", "0.5976023", "0.595699", "0.59379584", "0.59343755", "0.59263146", "0.5924577", "0.59218735", "0.5883561", "0.5867385", "0.58269936", "0.5821847", "0.5802779", "0.5774282", "0.57555526", "0.57509786", "0.574638", "0.57450294", "0.5740118", "0.57344127", "0.5717241", "0.57172257", "0.56846654", "0.56758934", "0.567149", "0.56553286", "0.56395864", "0.5639068", "0.5637476", "0.56370026", "0.563278", "0.56323016", "0.5628614", "0.5627348", "0.56260455", "0.5609896", "0.5604609", "0.56016976", "0.5601342", "0.55982894", "0.5597746", "0.55915093", "0.55913436", "0.55911785", "0.5580702", "0.557787", "0.55691504", "0.55665696", "0.55651593", "0.55644536", "0.55555946", "0.5553894", "0.55499774", "0.5549558", "0.55430317", "0.55393404", "0.5526295", "0.5524297", "0.5523442", "0.5521429", "0.55153036", "0.5514629", "0.551019", "0.5507653", "0.5503758", "0.54958874", "0.54932344", "0.5486296", "0.54810774", "0.54761463", "0.5475091", "0.54738265", "0.54711276", "0.5468878", "0.5465642", "0.54418707", "0.5437692", "0.5435852", "0.54288685", "0.54225165", "0.54206306", "0.5419599", "0.54185426", "0.54017556", "0.54011166", "0.53993434", "0.53983533", "0.5395225", "0.53945327", "0.5394164", "0.5389844" ]
0.0
-1
Generate a random sequence
def out(self, t: any, dim=(None, None)) -> any: # Initialize random step vector each sampling period using comprehensive list. step_vector = np.abs([round(gauss(self.mu, self.sigma), 1) for _ in range(self.n_step)]) step_vector[0] = self.ss # keep the steady state value as first u = np.zeros(shape=dim) # Initialize step control input array u. j = 0 ramp_Step = self.saw_time count = 1 for i in range(len(t)): # Excluding the last point if t[i] % self.step_time == 0 and t[i] != 0 and j + 1 != len(step_vector) and i != len( t) - 1: # No last step j += 1 count = 1 if self.ss is not None and j == 0: u[i, :] = self.ss else: if count != ramp_Step: u[i, :] = (step_vector[j] - step_vector[j - 1]) * (count / ramp_Step) + step_vector[j - 1] count += 1 else: u[i, :] = step_vector[j] return u
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_random_sequence():\n\n seq = []\n [seq.append(np.random.choice(cs.DNA_BASES)) for _ in range(cs.LENGTH)]\n\n return seq", "def generate_random(self: object) -> None:\n self.random.set(Sequence.generate(length=50))", "def start_random_sequence(self) -> int:\n return random.randint(0, TWO_BYTES)", "def random(self, seq=None):\n if seq is None:\n seq = self.seq\n seq_list = list(seq)\n random.shuffle(seq_list)\n return \"\".join(seq_list)", "def _generate_random_number_for_each_sequence(total, sequence_number):\r\n current_total = 0\r\n r = []\r\n for n in range(sequence_number-1, 0, -1):\r\n current = random.randint(1, total - current_total - n)\r\n current_total += current\r\n r.append(current)\r\n r.append(total - sum(r))\r\n random.shuffle(r)\r\n\r\n return r", "def choice(seq):\r\n i = int(random() * len(seq))\r\n return seq[i]", "def generate():\n s = random_data.random_bytes(100)\n return generate_from_string(s)", "def generate() -> int:\n return randint(0, 1000000000)", "def get_random_sequence(length):\n sequence = ''\n for i in range(length):\n random_letter = format(random.randrange(9), 'x')\n sequence = '{}{}'.format(sequence, random_letter)\n return sequence", "def generate_sequence(self, n=100, initial_state=None):\n\n if initial_state is None:\n if self.pad:\n sequence = [START_OF_SEQ] * self.order\n else:\n sequence = list(random.choice(self.records.keys()))\n else:\n sequence = initial_state[:]\n\n for i in range(n):\n current_state = tuple(sequence[-self.order:])\n next_token = self.sample(current_state)\n sequence.append(next_token)\n\n if next_token == END_OF_SEQ:\n return sequence\n\n return sequence", "def random_sample(seq):\r\n if len(seq) = 0:\r\n return None\r\n return sample(seq, randint(1, len(seq)/2))", "def rngnext():\n out = []\n # random\n state = random.getstate()\n out.append(f\"r={random.random():0.4f}\")\n random.setstate(state)\n\n # numpy\n state = np.random.get_state()\n out.append(f\"n={np.random.random():0.4f}\")\n np.random.set_state(state)\n\n # torch\n state = torch.random.get_rng_state()\n out.append(f\"t={torch.rand(1)[0]:0.4f}\")\n torch.random.set_rng_state(state)\n\n # cuda\n if torch.cuda.is_available():\n state = torch.cuda.get_rng_state()\n # note there is no function for generating a random in cuda but this may work?\n out.append(f\"c={state.float().std()%1:0.4f} {torch.backends.cudnn.deterministic}\")\n\n return out", "def rand(self):\n raise NotImplementedError(\"Not implemented yet.\")", "def generate_raiz():\n\treturn os.urandom(12)", "def random():\r\n return R.NextDouble()", "def rand(self):\n raise NotImplementedError", "def random(self, n=1):\n # self.num_generated += n", "def generate_sequence(n):\n\n sequence = []\n\n # generate sequence\n while n != 1:\n sequence.append(n)\n n = next_integer(n)\n\n # append 1 to sequence since all sequences assumed to end in 1\n sequence.append(1)\n\n return sequence", "def random_seq(length, nucleic_acid='DNA'):\n \n if nucleic_acid == 'DNA':\n alphabet = ('A','C','T','G')\n elif nucleic_acid == 'RNA':\n alphabet = ('A','C','U','G')\n\n so_far = ''\n for i in range(length):\n so_far += random.sample(alphabet, 1)[0]\n return so_far", "def setseq():\n\n if seqRnd[0]:\n r = getrndseq(seqRndWeight)\n if seqRnd[0] == 1:\n gbl.seqCount = r\n r = -1\n else:\n r = -1\n\n return ( r, seqRnd[1:] )", "def getRandom(self) -> int:", "def getRandom(self) -> int:", "def gen_seq(self,ntrials=20,pm_trial_position=None):\n # insert ranomly positioned pm trials\n if type(pm_trial_position)==type(None):\n ntrials -= 1+self.num_pm_trials\n pm_trial_position = np.random.randint(self.min_start_trials,ntrials,self.num_pm_trials) \n else:\n ntrials -= 1+len(pm_trial_position)\n pm_trial_position = pm_trial_position\n # generate og stim\n seq = np.random.randint(0,self.ntokens_og,ntrials)\n X = np.insert(seq,[0,*pm_trial_position],self.pm_token)\n # form Y \n Xroll = np.roll(X,self.nback)\n Y = (X == Xroll).astype(int) # nback trials\n Y[X==self.pm_token]=2 # pm trials\n return X,Y", "def generate_sequence(seq_len, query_distribution):\n\n np.random.seed()\n\n #normailze the frequencies to form a distribution\n query_ids, distribution = zip(*query_distribution)\n distribution /= sum(np.array(distribution))\n\n return np.random.choice(query_ids, size=seq_len,\n replace=True, p=distribution)", "def rseq(start=0.0, stop=1.0, N=10, randomness=0.5):\n\n return (randomness * sort(start + (stop - start) * rand(N))\n + (1 - randomness) * frange(start, stop, npts=N))", "def random_values():\n while True:\n yield random()", "def generate_seq(self):\n\n # Variable initialization\n eos = False\n c_s = 99\n x = []\n y = []\n\n while not eos:\n\n # Start of sequence\n if c_s == 99:\n # Sample from initial\n c_s = self.sample_p(self.proba[\"initial\"])\n\n # Consecutive iterations\n\n # We generate until we get length of self length\n elif len(x) < self.length:\n # Sample from transition of last state\n c_s = self.sample_p(self.proba[\"transition\"][c_s])\n\n # Generate emission\n\n # Note that we append the states as labels and observations as input\n y.append(c_s)\n x.append(self.sample_p(self.proba[\"emission\"][c_s]))\n\n else:\n eos = True\n\n # We get the state ID by offseting their idx by the length of observations\n ofs = len(self.obs)\n y = [i + ofs for i in y]\n return (x, y)", "def generate_numbers():\n\n return random.sample(range(100), 10)", "def generate(self) -> List[str]:\n\n self._reset()\n\n res = self._get_interactions(\n random.randint(self._min_seq_len, self._max_seq_len))\n\n self._add_guarded_first_named_alloc(res)\n\n if random.randint(0, 1):\n # Add some noise between source and destination\n # Is this helpful? Why?\n noise = self._get_interactions(\n random.randint(self._min_intervening_len,\n self._max_intervening_len))\n res.extend(noise)\n\n res.append(self._get_second_named_alloc())\n\n return stringify_sequence(res)", "def random(n: int) -> bytes:\n return os.urandom(n)", "def rand(self):\n return self.State.rand()", "def genNum(num, len):\n seed = ['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']\n for i in range(num):\n print ''.join(random.sample(seed, len))", "def random(self):\r\n return random.randint(1, 4)", "def totem_random():\n random_head()\n random_head()\n random_head()", "def code_generator():\n digits = [str(num) for num in range(10)]\n random.shuffle(digits)\n return digits[:3]", "def random():\n np.random.seed(1939)", "def rand_elem(seq, n=None):\n return map(random.choice, repeat(seq, n) if n is not None else repeat(seq))", "def randomSub(seed: float):\n crc = str(string.ascii_letters + string.digits)\n random.seed(seed)\n n = random.randint(10,30)\n return \"\".join(random.sample(crc, n))", "def _gen_random_number() -> float:\n return uniform(0, 1000)", "def getRandomSequence(seqLength):\n nucleotides = (\"A\", \"C\", \"G\", \"T\")\n seq = \"\"\n for i in range(seqLength):\n seq += random.choice(nucleotides)\n \n dictionary = {\"description\": \"Random sequence | \" + str(seqLength) + \"bp\", \"type\": \"dna\", \"data\" : seq}\n \n return dictionary", "def get_random(self):\n base_genom = \"1\" * sum(self._size_var)\n return utils.randomise_a_string(base_genom)", "def generate_code(self):\n seeds = \"1234567890\"\n random_str = []\n for i in range(4):\n random_str.append(choice(seeds))\n\n return \"\".join(random_str)", "def generate_seed():\n global seed\n seed = []\n\n for char_id in range(0, len(printable)):\n while True:\n char_sequence = [printable[randint(0, len(printable)-1)], printable[randint(0, len(printable)-1)]]\n if char_sequence not in seed:\n break\n seed.append(char_sequence)", "def generate_random_rot():\n from pyso3.quaternion import quat2rot\n import numpy as np\n q = np.random.randn(4)\n q = q / np.linalg.norm(q)\n return quat2rot(q)", "def generate_one_sample(dimension, sequence_length, repeat_times):\n # produce random sequence\n sequence = np.random.binomial(\n 1, 0.5, (sequence_length, dimension - 1)).astype(np.uint8)\n\n # allocate space for input sequence and output sequence\n input_sequence = np.zeros(\n (sequence_length + 1 + sequence_length * repeat_times, # + 1\n dimension),\n dtype=np.bool)\n output_sequence = np.zeros(\n (sequence_length + 1 + sequence_length * repeat_times, # + 1\n dimension),\n dtype=np.bool)\n\n # set value of input sequence\n input_sequence[:sequence_length, :-1] = sequence\n # input_sequence[sequence_length, -1] = repeat_times\n input_sequence[sequence_length, -1] = 1\n\n # set value of output sequence ## sequence_length + 1\n output_sequence[sequence_length+1:, :-1] = \\\n np.tile(sequence, (repeat_times, 1))\n # \"1\": A special flag which indicate the begin of the output\n # output_sequence[sequence_length, -1] = 1\n\n # return the sample\n return input_sequence, output_sequence", "def getRandom(self):\n return self.nums[randint(0, len(self.nums)-1)]", "def get_random(self,num):\n return ''.join(sample('abcdefghijklmnopqrstuvwxyz1234567890!',8))", "def random_number_generator(arg1, arg2):\n return 42", "def random(cls):\n return cls(os.urandom(32))", "def random_number(length=6):\n return randint(10**(length-1), (10**(length)-1))", "def random():\n return constant(1)", "def generate_random_numbers(self):\r\n #random.seed(seed=self.seed)\r\n #err = random.random((3,1))\r\n #f = open('test_res', 'a')\r\n #f.write('probability - %s' %self.seed)\r\n #f.write(str(list(err[:3,:])))\r\n #f.write('\\n')\r\n #f.close()\r\n\r\n dist = RandomDistribution(self.seed)\r\n rand_numbers = dist.return_random_variables(self.num_agents)\r\n return rand_numbers", "def i_rand_a():\n return i_random() % 95 + 32", "def generate_rng(nrngs, startseed=None):\n start_rng = np.random.RandomState(startseed)\n for i in range(nrngs):\n yield np.random.RandomState(start_rng.randint(2**32))", "def rand_gen(below, baseline):\n\treturn secrets.randbelow(below)/ baseline", "def randomNumberGenerator(self):\n #infinite loop of magical random numbers\n print(\"Making random numbers\")\n while not thread_stop_event.isSet():\n number = random.randint(10000,99999)\n print(number)\n socketio.emit('newQrCode', str(number), namespace='/test')\n time.sleep(5)", "def random():\n np.random.seed(0)", "def _generate_seq(sn):\n a, b = 0, 1\n for i in range(sn):\n yield str(a) + ' '\n a, b = b, a+b", "def get_random_sequence(genome):\n \n chr_list = get_chromosome_length(genome)\n \n random_seq = {}\n chr = random.sample(chr_list.keys(),1) #select chromosome\n slen = random.randint(300,1000) #select sequence length\n if chr_list[chr[0]] - slen > 0:\n spos = random.randint(1,chr_list[chr[0]] - slen) #select start position\n \n seq = get_fragment(genome, chr[0], slen, spos)\n if seq.count(\"N\") > 0.1 * slen:\n seq = get_random_sequence(genome)\n else:\n seq = get_random_sequence(genome)\n \n return seq", "def generator(self, args, gen):\n import random\n\n if args.seed:\n random.seed(args.seed)\n seqs = [s for s in gen]\n sample_indices = random.sample(range(len(seqs)), min(len(seqs), args.number))\n for i in sample_indices:\n yield seqs[i]", "def randkey():\n return binascii.b2a_hex(os.urandom(15))", "def genRandom(self, bits):\n _rand = 0\n _bytes = bits // 8 + 8\n\n while(len(bin(_rand))-2 < bits):\n\n try:\n _rand = int.from_bytes(random_function(_bytes), byteorder='big')\n except:\n _rand = int(random_function(_bytes).encode('hex'), 16)\n\n return _rand", "def rand(self): # Method doctring\n\n self._last_rand = xorshift32(self._last_rand, self.triple)\n return self._last_rand", "def generateRandomString():\n return ''.join(b64encode(urandom(32)).decode('utf-8'))", "def makeChrom(length):\n output = []\n for i in range(length):\n output.append(randrange(14))\n return output", "def getRandom(self) -> int:\n count = len(self.arr)\n return self.arr[randint(0, count-1)]", "def random_body():\n extra = random.randint(1, 9)\n sequence = (hex(rand32()) for _ in range(extra))\n return hex(id_) + '\\n' + '\\n'.join(sequence)", "def generator(self, random, args):\r\n raise NotImplementedError", "def random_keys(self):\n while True:\n yield self.generator.str()", "def i_random():\n global randrsl, randcnt\n\n r = randrsl[randcnt]\n randcnt += 1\n if (randcnt > 255):\n isaac_()\n randcnt = 0\n\n return r", "def gensalt():\n return hexlify(os.urandom(24)).decode()", "def genKey(length=32):\r\n return os.urandom(length)", "def sample(self):\n seq = []\n for i in range(self._pwm.shape[1]):\n p = numpy.array(self._pwm[:, i], dtype=numpy.float64)\n p /= p.sum()\n seq.extend(numpy.random.choice(self.alphabet, p=p))\n return \"\".join(seq)", "def rs():\n return random.choice([-1,1])", "def rs():\n return random.choice([-1,1])", "def genRandString(dl = 10):\n ret = ''\n for i in range(dl) :\n ret += random.choice(string.ascii_letters + string.digits)\n return ret", "def gen_rand(l):\n w = int(l / 2)\n\n min = (1 << (w - 1)) | 1\n max = (1 << w) - 1\n\n n = random.randrange(min, max) | 1\n\n return n", "def getRandom(self) -> int:\n steps = random.randint(0, self.len-1) # 随机抽取一个\n temp = self.head\n for i in range(steps):\n temp=temp.next\n return temp.val", "def random_num(self):\r\n self.generate_n1()\r\n self.generate_n2()\r\n self.generate_n3()\r\n self.generate_n4()\r\n random_number = str(self.n_1decimal)+str(self.n_2decimal)+str(self.n_3decimal)+str(self.n_4decimal)\r\n print int(random_number)", "def getRandomAngle():\r\n\treturn random.random() * math.pi * 2", "def seed_random(max_integer):\n return random.randrange(0,max_integer);", "def generator(self, random, args):\n\t\traise NotImplementedError", "def rand_ident():\n return random.randrange(MAX_IDENT)", "def gen_code():\n return ''.join([random.choice(string.ascii_uppercase + string.digits) for _ in range(10)])", "def _random_issn():\n first = randint(1000, 9999)\n second = randint(100, 999)\n return str(first) + \"-\" + str(second) + str(_select_from([1, 2, 3, 4, 5, 6, 7, 8, 9, \"X\"]))", "def generate(self):\n node = self.generate_random()\n\n while True:\n yield node.state[-1]\n if len(node.next_states) != 0:\n node = node.get_next_state()\n if node == None:\n node = self.generate_random()\n while len(node.next_states) == 0:\n node = self.generate_random()\n else:\n node = self.generate_random()\n while len(node.next_states) == 0:\n node = self.generate_random()", "def generateSequenceBias(self, bias):\n\n if bias < 0 or bias > 1:\n raise ValueError(\"Bias must be a value between 0 and 1.\")\n else:\n for i in range(self.length):\n self.sequence.append(0 if random.random() < bias else 1)\n self.biasSeq = 1\n self.bias = bias", "def random_generator(nurses_number: int = 10):\n\n # For each possible shift of all the nurses, is generated randomly a value to define as allocated or not\n state = ''\n\n # The range goes from 0 to 21*nurses_number. This happens because we every time have 21 shifts to n nurses\n for i in range(0, 21 * nurses_number):\n state = state + str(randrange(0, 2))\n\n # Return the new state generated\n return state", "def _generate_string_seq():\n input_word_num = random.randint(1, config.MAX_INPUT_WORD_NUMBER)\n return ' '.join(resources.get_random_words(input_word_num))", "def __generate_random_gene_sequence(self):\n genes = []\n for j in range(self.chromosome_size):\n genes.append(random.choice(self.gene_pool))\n\n return genes", "def my_random(a):\r\n import random\r\n r = random.randint(0, 100)\r\n return a + r", "def make_repeatable():\n random.seed(1234)\n np.random.seed(1234)", "def gen_random(\n l: list,\n n: int,\n seed: int = None\n ) -> str:\n\n # Initialisations\n s = \"\"\n\n # Loop for the desired length of the string\n for i in range(0, n):\n\n if seed is not None:\n\n numpy.random.seed(seed + i)\n\n # Append the next random character\n s += numpy.random.choice(l)\n\n return s", "def generate_custom_sequence(program, pass_space=DEFAULT_GENE_POOL,\n debug=False):\n global print_out\n print_out = debug\n return simulate_generations(pass_space, program)", "def randomSeq(n, a, b):\n \n return [\n Complex(a + np.random.random()*(b-a), a + np.random.random()*(b-a))\n for _ in range(n)\n ]", "def random_sequence(amount=100, start=0, stop=0, reverse=False):\n \n sequence = []\n if start == stop:\n for i in range(amount):\n sequence.append(stop)\n elif start < stop and not reverse: \n for i in range(amount):\n number = random.randrange(start, stop)\n sequence.append(number)\n sequence.sort()\n elif start < stop and reverse: \n for i in range(amount):\n number = random.randrange(start, stop)\n sequence.append(number)\n sequence.sort(reverse=True)\n\n elif start > stop and not reverse: \n for i in range(amount):\n number = random.randrange(stop, start)\n sequence.append(number)\n sequence.sort()\n elif start > stop and reverse: \n for i in range(amount):\n number = random.randrange(stop, start)\n sequence.append(number)\n sequence.sort(reverse=True)\n return sequence", "def getRandom(self) -> int:\n return random.choice(tuple(self.l))", "def computer_generate(self):\n return choice[random.randrange(3)]", "def rand(lo=0, hi=1):\n global Seed\n Seed = (16807 * Seed) % 2147483647\n return lo + (hi - lo) * Seed / 2147483647", "def random_gene(self):\n size = random.randint(1,50)\n gene = \"\"\n for i in range(0,size,1):\n gene+=random.choice(self.instructions)\n return gene", "def shotgenerator():\n return random.randint(0, 9), random.randint(0, 9)" ]
[ "0.7697955", "0.7600561", "0.7551118", "0.75248724", "0.7125383", "0.7092031", "0.69861037", "0.69277364", "0.6886229", "0.6868949", "0.6833825", "0.6803206", "0.67367864", "0.67331696", "0.6730156", "0.6715812", "0.6699407", "0.66590357", "0.66281706", "0.66149676", "0.65995187", "0.65995187", "0.6595084", "0.65926003", "0.6589287", "0.6526456", "0.6507666", "0.6493813", "0.64916354", "0.64704496", "0.64284474", "0.6412195", "0.6389472", "0.6383682", "0.6376732", "0.63756526", "0.63748145", "0.6361774", "0.6359661", "0.6349557", "0.6347794", "0.6343306", "0.63256556", "0.6319575", "0.6316664", "0.6297366", "0.62889344", "0.6288031", "0.62869245", "0.6272077", "0.6254106", "0.62495464", "0.62472504", "0.62420136", "0.6236023", "0.62344503", "0.62164193", "0.6213061", "0.62045175", "0.61961657", "0.6188359", "0.6182299", "0.6170367", "0.61689585", "0.61681896", "0.6154074", "0.6146085", "0.6143121", "0.6138793", "0.6134056", "0.6122844", "0.61178416", "0.6107172", "0.61066616", "0.61066616", "0.6104341", "0.61021745", "0.60951346", "0.6094966", "0.609262", "0.6077214", "0.60750395", "0.6070348", "0.60690355", "0.6063439", "0.6060368", "0.60598695", "0.6059355", "0.6054469", "0.60532415", "0.60517377", "0.6040376", "0.6034677", "0.60313475", "0.60302454", "0.60285693", "0.60275376", "0.60235995", "0.60178214", "0.6015", "0.6013653" ]
0.0
-1
Retry calling the decorated function using an exponential backoff.
def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None): def deco_retry(f): @wraps(f) def f_retry(*args, **kwargs): mtries, mdelay = tries, delay while mtries > 1: try: return f(*args, **kwargs) except ExceptionToCheck, e: msg = "%s, Retrying in %d seconds..." % (str(e), mdelay) if logger: logger.warning(msg) else: print(msg) time.sleep(mdelay) mtries -= 1 mdelay *= backoff return f(*args, **kwargs) return f_retry # true decorator return deco_retry
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def retry(tries, delay=3, backoff=2, except_on=(Exception, )):\n\n tries = math.floor(tries)\n\n def decorator(f):\n def f_retry(*args, **kwargs):\n return function_retry(\n tries, delay, backoff, except_on, f, *args, **kwargs)\n return f_retry # true decorator -> decorated function\n return decorator # @retry(arg[, ...]) -> true decorator", "def retry(func):\n # ... retry MAX_RETRIES times\n # ...\n # make sure you include this for testing:\n # except Exception as exc:\n # print(exc)\n # ...\n # and use wraps to preserve docstring\n #\n @wraps(func)\n def wrapper(*args, **kwargs):\n\n tries = MAX_RETRIES\n while tries > 0:\n try:\n return func(*args, **kwargs)\n except Exception as err:\n print(err)\n\n tries -= 1\n\n raise MaxRetriesException\n\n return wrapper", "def _Retry(func, *args, **kwargs):\n retries = _RETRIES\n while True:\n try:\n return func(*args, **kwargs)\n except Exception as e: # pylint: disable=broad-except\n retries -= 1\n if retries > 0:\n log.info('Exception {e} thrown in {func}. Retrying.'.format(\n e=e, func=func.__name__))\n time.sleep(1)\n else:\n raise e", "def retry(exception, tries=10, delay=1, backoff=2, max_delay=30):\n def deco_retry(f):\n @wraps(f)\n def f_retry(*args, **kwargs):\n m_tries, m_delay = tries, delay\n while m_tries > 1:\n try:\n return f(*args, **kwargs)\n except exception:\n time.sleep(min(m_delay, max_delay))\n m_tries -= 1\n m_delay *= backoff\n return f(*args, **kwargs)\n return f_retry # true decorator\n return deco_retry", "def auto_retry(fun):\n\n @functools.wraps(fun)\n def decorated(instance, *args, **kwargs):\n \"\"\"Wrapper around a decorated function.\"\"\"\n cfg = instance._retry_config\n remaining_tries = cfg.retry_attempts\n current_wait = cfg.retry_wait\n retry_backoff = cfg.retry_backoff\n last_error = None\n\n while remaining_tries >= 0:\n try:\n return fun(instance, *args, **kwargs)\n except socket.error as e:\n last_error = e\n instance._retry_logger.warning('Connection failed: %s', e)\n\n remaining_tries -= 1\n if remaining_tries == 0:\n # Last attempt\n break\n\n # Wait a bit\n time.sleep(current_wait)\n current_wait *= retry_backoff\n\n # All attempts failed, let's raise the last error.\n raise last_error\n\n return decorated", "def decorated(instance, *args, **kwargs):\n cfg = instance._retry_config\n remaining_tries = cfg.retry_attempts\n current_wait = cfg.retry_wait\n retry_backoff = cfg.retry_backoff\n last_error = None\n\n while remaining_tries >= 0:\n try:\n return fun(instance, *args, **kwargs)\n except socket.error as e:\n last_error = e\n instance._retry_logger.warning('Connection failed: %s', e)\n\n remaining_tries -= 1\n if remaining_tries == 0:\n # Last attempt\n break\n\n # Wait a bit\n time.sleep(current_wait)\n current_wait *= retry_backoff\n\n # All attempts failed, let's raise the last error.\n raise last_error", "def _retry_provider_call(self, func):\n\n @functools.wraps(func)\n def decorated(*args, **kwargs):\n max_retries = 29\n attempts = 0\n while attempts < max_retries:\n try:\n return func(*args, **kwargs)\n except ClientError as e:\n attempts += 1\n raise RetryLimitExceededError(\n \"Exceeded request limit {} times. Aborting.\".format(max_retries)\n )\n return decorated", "def retrying(func, *retry_args, **retry_kwargs):\n yield retriable(*retry_args, **retry_kwargs)(func)", "def retry(nattempts, exception=None):\n \n def tryIt(func):\n def wrapper(*args, **kwargs):\n attempts = 0\n while attempts < nattempts - 1:\n try:\n return func(*args, **kwargs)\n except (exception if exception is not None else Exception):\n attempts += 1\n return func(*args, **kwargs)\n return wrapper\n return tryIt", "def retry(retries=5):\n\n def decorator(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n count = 0\n while True:\n try:\n return fn(*args, **kwargs)\n except (\n BadRequest,\n BadResponseException,\n ReadTimeout,\n RequestException,\n TraktBadGateway,\n TraktUnavailable,\n TraktInternalException,\n ) as e:\n if count == retries:\n logger.error(f\"Error: {e}\")\n\n if isinstance(e, BadResponseException):\n logger.error(f\"Details: {e.details}\")\n if isinstance(e, TraktInternalException):\n logger.error(f\"Error message: {e.error_message}\")\n\n logger.error(\n \"API didn't respond properly, script will abort now. Please try again later.\"\n )\n logger.error(\n f\"Last call: {fn.__module__}.{fn.__name__}({args[1:]}, {kwargs})\"\n )\n exit(1)\n\n seconds = 1 + count\n count += 1\n logger.warning(\n f\"{e} for {fn.__module__}.{fn.__name__}(), retrying after {seconds} seconds (try: {count}/{retries})\"\n )\n sleep(seconds)\n\n return wrapper\n\n return decorator", "def retrying(cls, fn, retries_allowed=None, wait_seconds=None, wait_increment=None, wait_multiplier=None):\n # A special name_key of 'anonymous' is the default, which causes there not to be a name key.\n # This cannot work in conjunction with RetryManager because different calls may result in different\n # function values at the same point in code. -kmp 8-Jul-2020\n decorator_function = Retry.retry_allowed(\n name_key='anonymous', retries_allowed=retries_allowed, wait_seconds=wait_seconds,\n wait_increment=wait_increment, wait_multiplier=wait_multiplier\n )\n return decorator_function(fn)", "def _retry(func):\n @wraps(func)\n def _retry_wrapper(self, *args, **kwargs):\n error_message = \"\"\n for retry in range(self.retries + 1):\n try:\n return func(self, *args, **kwargs)\n except ValueError as err:\n error_message = str(err)\n raise ValueError(str(error_message))\n return _retry_wrapper", "def retry_task(func):\n\n @wraps(func)\n def wrapper(task, *args, **kwargs):\n retries = task.request.retries\n exponential = 2 ** retries\n exponential_backoff = random.randint(exponential, exponential * 2)\n try:\n result = func(task, *args, **kwargs)\n except Exception as e:\n logger.error(\n f\"Retriying {task.request.id} after {exponential_backoff} seconds\"\n )\n raise task.retry(countdown=exponential_backoff, exc=e, max_retries=5)\n\n return result\n\n return wrapper", "def retry(func, *args, **kwargs):\n\n # config\n backoff = 1. + random.random() * 0.1\n max_backoff = 32\n max_retries = 5\n\n # try to make the request\n for i in range(max_retries):\n try:\n # return on success\n return func(*args, **kwargs)\n except Exception:\n # sleep on failure\n time.sleep(backoff)\n backoff = 2 * backoff if backoff < max_backoff else backoff\n \n # max retries exceeded\n raise RuntimeError('The connection to the server timed out.')", "def retry(exceptions=Exception, tries=3, delay=1):\n\n def retry_decorator(func):\n def func_wrapper(*args, **kwargs):\n _tries = tries\n while _tries:\n try:\n return func(*args, **kwargs)\n except exceptions as e:\n _tries -= 1\n if not _tries:\n raise\n\n time.sleep(delay)\n\n return func_wrapper\n\n return retry_decorator", "def retry(exception, tries=10, delay=3, backoff=0.1):\n def deco_retry(f):\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except exception as ex:\n print \"{0}, Retrying in {1} seconds...\".format(ex, mdelay)\n time.sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n return f(*args, **kwargs)\n return f_retry # true decorator\n return deco_retry", "def retryable(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n retries = 0\n max_retries = kwargs.get(\"max_retries\", DEFAULT_RETRIES)\n backoff = kwargs.get(\"backoff\", default_backoff)\n while retries <= max_retries:\n try:\n return func(*args, **kwargs)\n except IntegrityError:\n logging.debug(\n \"Race-condition caught? ({}/{} retries)\".format(retries, max_retries)\n )\n if retries >= max_retries:\n logging.error(f\"Unable to execute {func}, max retries exceeded\")\n raise\n retries += 1\n backoff(retries, max_retries)\n\n return wrapper", "def retry(tries, delay=3, backoff=2):\n tries = math.floor(tries)\n if tries < 0:\n raise ValueError(\"tries must be 0 or greater\")\n\n\n def deco_retry(f):\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay # make mutable\n err = None\n while mtries > 0:\n print(\"Trial Number:\" + str(mtries))\n try:\n rv = f(*args, **kwargs)\n except DBException as e:\n print(\"Retry..\")\n mtries -= 1 # consume an attempt\n time.sleep(mdelay) # wait...\n mdelay += backoff # make future wait longer\n err = e\n\n # except Exception as e:\n # print(str(e))\n # mtries -= 1 # consume an attempt\n # time.sleep(mdelay) # wait...\n # mdelay += backoff # make future wait longer\n # err = e\n else:\n return rv\n raise err\n\n return f_retry # true decorator -> decorated function\n\n return deco_retry # @retry(arg[, ...]) -> true decorator", "def _retry(method, max_tries=5, backoff_s=1):\n\n @wraps(method)\n def method_with_retries(self, *args, **kwargs):\n try_count = 0\n while try_count < max_tries:\n try:\n return method(self, *args, **kwargs)\n except BrokenPipeError:\n logger.warning(\"Caught a BrokenPipeError. Retrying.\")\n try_count += 1\n if try_count < max_tries:\n self._construct_clients()\n time.sleep(backoff_s)\n else:\n raise\n\n return method_with_retries", "def retry(maxRetries, *exceptions):\n def _doDecoration(fn):\n def _doRetry(*args, **kwargs):\n retries = 0\n while retries <= maxRetries:\n try:\n return fn(*args, **kwargs)\n except tuple(exceptions):\n retries +=1\n if retries > maxRetries:\n raise\n \n return _doRetry\n return _doDecoration", "def wrap(fn):\n\n def wrapped_fn(*args, **kwargs):\n \"\"\"The actual wrapper function that applies the retry logic.\"\"\"\n for delay in delays():\n try:\n return fn(*args, **kwargs)\n except Exception as e: # pylint: disable=broad-except)\n if is_retriable is None:\n continue\n\n if is_retriable(e):\n time.sleep(delay)\n else:\n raise\n return fn(*args, **kwargs)\n\n return wrapped_fn", "def url_socket_retry(func, *args, **kw):\n min_delay = 1\n max_delay = 32\n max_attempts = 4\n\n for idx, delay in enumerate(\n backoff_delays(min_delay, max_delay, jitter=True)):\n try:\n return func(*args, **kw)\n except HTTPError as err:\n if not (err.status == 503 and 'Slow Down' in err.reason):\n raise\n if idx == max_attempts - 1:\n raise\n except URLError as err:\n if not isinstance(err.reason, socket.error):\n raise\n if err.reason.errno not in (104, 110):\n raise\n if idx == max_attempts - 1:\n raise\n\n time.sleep(delay)", "def retry(\n self, n: int, /, *args, error: Catchable = Exception, sleep=None, **kwargs\n ) -> \"fn\":\n\n func = self._mod.retry(n, self, error=error, sleep=sleep)\n return func(*args, **kwargs)", "def retry(times: int, on_exceptions: List[Exception]):\n def decorator(function: Callable):\n @wraps(function)\n def wrapper(*args, **kwargs):\n raised = []\n for _ in range(times):\n try:\n return function(*args, **kwargs)\n except Exception as ex:\n raised.append(ex)\n if type(ex) not in on_exceptions:\n raise RetryError(\n 'An unexpected error occurred while calling the function '+\n f'{function.__name__}.'\n ) from ex\n raise raised.pop()\n return wrapper\n return decorator", "def retry(func, *args, **kwargs):\n @functools.wraps(func)\n def wrapper(*w_args, **w_kwargs):\n w_kwargs.update(kwargs)\n return retry_function_on_deadlock(func, *w_args, **w_kwargs)\n\n return wrapper", "def retry_multi(max_retries=5):\n\n def retry(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n num_retries = 0\n ret = None\n while num_retries <= max_retries:\n try:\n ret = func(*args, **kwargs)\n break\n except Exception as e:\n logger.exception(e)\n if num_retries == max_retries:\n raise\n num_retries += 1\n time.sleep(5)\n return ret\n\n return wrapper\n\n return retry", "def retry(ExceptionToCheck, tries=3, delay=3, backoff=2):\n\n def deco_retry(f):\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except ExceptionToCheck as e:\n logging.warning('%s, Retrying in %d seconds...', str(e), mdelay)\n time.sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n return f(*args, **kwargs)\n\n return f_retry\n\n return deco_retry", "def test_retry(self):\n retries = [0]\n max_tries = 5\n\n @retry(Exception, max_retries=5)\n def f():\n retries[0] += 1\n raise Exception(\"Faulty function\")\n\n with self.assertRaises(Exception):\n f()\n\n self.assertEqual(max_tries, retries[0])", "def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):\n\tdef deco_retry(f):\n\t\t@wraps(f)\n\t\tdef f_retry(*args, **kwargs):\n\t\t\tmtries, mdelay = tries, delay\n\t\t\twhile mtries > 1:\n\t\t\t\ttry:\n\t\t\t\t\treturn f(*args, **kwargs)\n\t\t\t\texcept ExceptionToCheck, e:\n\t\t\t\t\tmsg = \"func: '{}' > exc: {}, Retrying in {} seconds...\".format(str(f.__name__), str(e), mdelay)\n\t\t\t\t\tif logger:\n\t\t\t\t\t\tlogger.warning(msg)\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint msg\n\t\t\t\t\ttime.sleep(mdelay)\n\t\t\t\t\tmtries -= 1\n\t\t\t\t\tmdelay *= backoff\n\t\t\treturn f(*args, **kwargs)\n\t\treturn f_retry\t# true decorator\n\treturn deco_retry", "def retry(attempts_number, delay=0, step=0, max_delay=-1,\n retry_on=Exception, logger=None):\n\n def decorator(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n current_logger = logger\n\n attempts = 1\n retry_delay = delay\n\n try:\n if isinstance(args[0], object):\n current_logger = args[0].get_logger()\n except (AttributeError, IndexError):\n pass\n\n if isinstance(retry_on, (types.FunctionType,\n types.MethodType,)):\n catch_strategy = CatchFunctionStrategy(retry_on)\n else:\n catch_strategy = CatchExceptionStrategy(retry_on)\n\n while attempts <= attempts_number or attempts_number < 0:\n try:\n return func(*args, **kwargs)\n except Exception as e:\n if catch_strategy.need_to_retry(e):\n if attempts >= attempts_number >= 0:\n raise\n elif current_logger:\n retry_count = \"inf\" if attempts_number < 0 \\\n else attempts_number - 1\n\n current_logger.warning(\n \"Retry: Call to %(fn)s failed due to \"\n \"%(exc_class)s: %(exc)s, retry \"\n \"attempt #%(retry_no)s/\"\n \"%(retry_count)s after %(delay)ss\",\n dict(fn=func.__name__,\n exc=str(e),\n retry_no=attempts,\n exc_class=e.__class__.__name__,\n retry_count=retry_count,\n delay=retry_delay))\n time.sleep(retry_delay)\n attempts += 1\n retry_delay += step\n if 0 <= max_delay < retry_delay:\n retry_delay = max_delay\n else:\n raise\n return wrapper\n return decorator", "def test_retry_raises_error_on_negative_retries(self):\n\n @retry(Exception, max_retries=-1)\n def f():\n raise Exception(\"Faulty function\")\n\n self.assertRaises(ValueError, f)", "def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):\n def deco_retry(f):\n\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except ExceptionToCheck, e:\n msg = \"%s, Retrying in %d seconds...\" % (str(e), mdelay)\n if logger:\n logger.warning(msg)\n else:\n print msg\n time.sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n return f(*args, **kwargs)\n\n return f_retry # true decorator\n\n return deco_retry", "def retry(ExceptionToCheck, tries=3, delay=3, backoff=2, logger=None):\n def deco_retry(f):\n\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except ExceptionToCheck, e:\n msg = \"%s, Retrying in %d seconds...\" % (str(e), mdelay)\n if logger:\n logger.warning(msg)\n else:\n print msg\n time.sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n return f(*args, **kwargs)\n\n return f_retry # true decorator\n\n return deco_retry", "def retry(exceptions, tries=3, delay=2, _logger=logger()):\n\n def deco_retry(f):\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except exceptions as e:\n msg = '{}, Retrying in {} seconds...'.format(e, mdelay)\n _logger.warning(msg)\n time.sleep(mdelay)\n mtries -= 1\n return f(*args, **kwargs)\n\n return f_retry # true decorator\n\n return deco_retry", "def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):\n def deco_retry(f):\n\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except ExceptionToCheck as e:\n msg = \"%s, Retrying in %d seconds...\" % (str(e), mdelay)\n if logger:\n logger.warning(msg)\n else:\n print(msg)\n sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n return f(*args, **kwargs)\n\n return f_retry # true decorator\n\n return deco_retry", "def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):\r\n def deco_retry(f):\r\n\r\n @wraps(f)\r\n def f_retry(*args, **kwargs):\r\n mtries, mdelay = tries, delay\r\n while mtries > 1:\r\n try:\r\n return f(*args, **kwargs)\r\n except ExceptionToCheck, e:\r\n msg = \"%s, Retrying in %d seconds...\" % (str(e), mdelay)\r\n if logger:\r\n logger.warning(msg)\r\n else:\r\n print msg\r\n time.sleep(mdelay)\r\n mtries -= 1\r\n mdelay *= backoff\r\n return f(*args, **kwargs)\r\n\r\n return f_retry # true decorator\r\n\r\n return deco_retry", "def _retry_refresh(wrapper, *a3, **k3):\n return func(wrapper, *a3, **k3)", "def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):\r\n def deco_retry(f):\r\n\r\n @wraps(f)\r\n def f_retry(*args, **kwargs):\r\n mtries, mdelay = tries, delay\r\n while mtries > 1:\r\n try:\r\n return f(*args, **kwargs)\r\n except ExceptionToCheck as e:\r\n msg = \"%s, Retrying in %d seconds...\" % (str(e), mdelay)\r\n if logger:\r\n logger.warning(msg)\r\n else:\r\n print (msg)\r\n time.sleep(mdelay)\r\n mtries -= 1\r\n mdelay *= backoff\r\n return f(*args, **kwargs)\r\n return f_retry # true decorator\r\n return deco_retry", "def exp_backoff_fn(fn, *args):\n if not on_win:\n return fn(*args)\n\n import time\n import errno\n max_tries = 6 # max total time = 6.4 sec\n for n in range(max_tries):\n try:\n result = fn(*args)\n except (OSError, IOError) as e:\n if e.errno in (errno.EPERM, errno.EACCES):\n if n == max_tries - 1:\n raise Exception(\"max_tries=%d reached\" % max_tries)\n time.sleep(0.1 * (2 ** n))\n else:\n raise e\n else:\n return result", "def retryCall(fn, args=None, keywordArgs=None, failureTester=None, sleepManager=None):\n sleepManager = sleepManager or time.SleepManager()\n while True:\n try:\n result = yield fn(*args, **keywordArgs)\n defer.returnValue(result)\n except Exception: # pylint: disable=W0703\n failureTester(failure.Failure())\n yield sleepManager.sleep()", "def retry(callback, retries, sleep=0.5, catch=Exception, *args, **kwargs):\n r = 0\n while r < retries:\n r += 1\n try:\n return callback(*args, **kwargs)\n except catch as c:\n if r == retries:\n raise c\n else:\n time.sleep(r * sleep)", "def retry(retry_times=3, interval=0.5, exceptions=Exception):\n def _decorator(func):\n @wraps(func)\n def _wrapped_func(*args, **kwargs):\n for attempt in range(1, retry_times + 1):\n try:\n return func(*args, **kwargs)\n except exceptions: # pylint: disable=broad-except\n if attempt < retry_times:\n logger.debug(\"%s failed in No. %d attempt\", func, attempt)\n import traceback\n import time\n logger.debug(traceback.format_exc())\n time.sleep(interval)\n else:\n raise # End of retry. Re-raise the exception as-is.\n return _wrapped_func\n return _decorator", "def default_backoff(retries, max_retries):\n\n time.sleep(random.random() * (max_retries - retries) / max_retries * 2)", "def smart_retry(f):\n # type: (Callable) -> CallableT\n\n @functools.wraps(f)\n def wrapper(api_instance, *args, **kwargs):\n # type: (UnifiAPI, *Any, **Any) -> Any\n try:\n return f(api_instance, *args, **kwargs)\n except Unauthorized as e:\n\n api_instance.log.debug(\n \"An exception occurred when executing %s: %s. Refreshing the connection to the Controller and retrying\",\n f.__name__,\n e,\n )\n api_instance.connect()\n return f(api_instance, *args, **kwargs)\n\n except Exception:\n raise\n\n return cast(CallableT, wrapper)", "def retriable(*retry_args, **retry_kwargs):\n def _retriable_factory(func):\n @wraps(func)\n def _retriable_wrapper(*args, **kwargs):\n return retry(func, args=args, kwargs=kwargs, *retry_args,\n **retry_kwargs)\n return _retriable_wrapper\n return _retriable_factory", "def _timeout_retry(func, *args, **kwargs):\n tried = kwargs.pop('_____retires', 0)\n try:\n q = func(*args, **kwargs)\n except (TimeoutError, TableParseError) as exc:\n if tried >= MAX_RETRIES_TIMEOUT:\n raise TimeoutError(f'TimeOut obtained in {MAX_RETRIES_TIMEOUT}'\n ' tries, aborting.') from exc\n return _timeout_retry(func, *args, **kwargs, _____retires=tried+1)\n return q", "def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):\n def deco_retry(f):\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n try_one_last_time = True\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n try_one_last_time = False\n break\n except ExceptionToCheck, e:\n if logger:\n msg = getMessage(\"en\", \"retrying-notification\").format(str(e), mdelay)\n logger.warning(msg)\n time.sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n if try_one_last_time:\n return f(*args, **kwargs)\n return\n return f_retry # true decorator\n return deco_retry", "def wrapped_fn(*args, **kwargs):\n for delay in delays():\n try:\n return fn(*args, **kwargs)\n except Exception as e: # pylint: disable=broad-except)\n if is_retriable is None:\n continue\n\n if is_retriable(e):\n time.sleep(delay)\n else:\n raise\n return fn(*args, **kwargs)", "def call_with_retries(function, max_retries=10,\n exception_types=(Exception),\n _args=(), _kwargs={}):\n assert max_retries >= 0\n\n retries = 0\n last_exc = Exception('Unknown exception')\n while retries <= max_retries:\n try:\n return function(*_args, **_kwargs)\n except exception_types as exc:\n retries += 1\n wait = 2.0 ** retries * 0.1 + (random.randint(0, 1000) / 1000)\n time.sleep(wait)\n last_exc = exc\n raise last_exc", "def retry(func, repeat=3, delay=tickTime * 2):\n\twhile repeat:\n\t\tresult = func()\n\n\t\tif result is None and delay and repeat != 1:\n\t\t\tsleep(delay)\n\n\t\telse:\n\t\t\treturn result\n\n\t\trepeat -= 1", "def execute_with_retry(f, args=[], kwargs={}, retry_on=(Exception,),\n max_tries=3, sleep=5):\n attempt = 0\n result = None\n while attempt < max_tries:\n attempt += 1\n try:\n result = f(*args, **kwargs)\n break\n except retry_on, e:\n if attempt >= max_tries:\n raise e\n log(\"Function call failed ('%s': %i/%i).\\n\"\n \"Reason: %s.\\n\"\n \"Wait for %i sec before retry...\"\n % (f.__name__, attempt, max_tries, str(e), sleep))\n time.sleep(sleep)\n return result", "def retry(exception_to_check, tries=4, delay=0.5, backoff=2):\n\n def deco_retry(f):\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n try_one_last_time = True\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n try_one_last_time = False\n break\n except exception_to_check, e:\n msg = \"%s, Retrying in %d seconds...\" % (str(e), mdelay)\n logging.warning(msg)\n time.sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n if try_one_last_time:\n return f(*args, **kwargs)\n return\n return f_retry\n return deco_retry", "def retry_query(tries=3, delay=1):\n\n def retry_wrapper(func):\n \"\"\"Wrapper function.\n :params func: function to call\n :return: wrapper function\n \"\"\"\n\n @functools.wraps(func)\n def inner(*args, **kwargs):\n \"\"\"Inner wrapper function\n :params *args: list of different arguments\n *kwargs: dictionary of different arguments\n \"\"\"\n\n mtries = tries\n mdelay = delay\n\n while mtries:\n try:\n return func(*args, **kwargs)\n except Exception: # pylint: disable=broad-except\n if mtries:\n time.sleep(mdelay)\n mtries -= 1\n\n return inner\n\n return retry_wrapper", "def retry_call(\n callabl: Callable,\n args=None,\n kwargs=None,\n exceptions: Tuple[Any, ...] = (),\n retries: int = 10,\n wait: float = 0.1,\n) -> Any:\n\n if args is None:\n args = ()\n if kwargs is None:\n kwargs = {}\n\n for attempt in range(1, retries + 1):\n try:\n return callabl(*args, **kwargs)\n except exceptions:\n if attempt < retries:\n time.sleep(wait)\n else:\n raise", "def retry_on_refuse(f, *args, **kwargs):\n i = 0\n while True:\n try:\n i += 1\n f(*args, **kwargs)\n break\n except (OSError, socket.error) as e:\n if e.args[0] != socket.errno.ECONNREFUSED or i > 10000:\n raise\n else:\n time.sleep(0.001)", "def retry_allowed(cls, name_key=None, retries_allowed=None, wait_seconds=None,\n wait_increment=None, wait_multiplier=None):\n\n def _decorator(function):\n function_name = name_key or function.__name__\n function_profile = cls.RetryOptions(\n retries_allowed=cls._defaulted(retries_allowed, cls.DEFAULT_RETRIES_ALLOWED),\n wait_seconds=cls._defaulted(wait_seconds, cls.DEFAULT_WAIT_SECONDS),\n wait_increment=cls._defaulted(wait_increment, cls.DEFAULT_WAIT_INCREMENT),\n wait_multiplier=cls._defaulted(wait_multiplier, cls.DEFAULT_WAIT_MULTIPLIER),\n )\n\n check_true(isinstance(retries_allowed, int) and retries_allowed >= 0,\n \"The retries_allowed must be a non-negative integer.\",\n error_class=ValueError)\n\n # See the 'retrying' method to understand what this is about. -kmp 8-Jul-2020\n if function_name != 'anonymous':\n cls._RETRY_OPTIONS_CATALOG[function_name] = function_profile # Only for debugging.\n\n @functools.wraps(function)\n def wrapped_function(*args, **kwargs):\n tries_allowed = function_profile.tries_allowed\n wait_seconds = function_profile.wait_seconds or 0\n last_error = None\n for i in range(tries_allowed):\n if i > 0:\n if i > 1:\n wait_seconds = function_profile.wait_adjustor(wait_seconds)\n if wait_seconds > 0:\n time.sleep(wait_seconds)\n try:\n success = function(*args, **kwargs)\n return success\n except Exception as e:\n last_error = e\n if last_error is not None:\n raise last_error\n\n return wrapped_function\n\n return _decorator", "def retry(exception_to_check=AssertionError, tries=100, delay=.1):\n def deco_retry(f):\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 0:\n try:\n return f(*args, **kwargs)\n except exception_to_check, e:\n log.info('%s, Retrying in %s seconds...' % (str(e), mdelay))\n time.sleep(mdelay)\n mtries -= 1\n try_time = float(tries*delay)\n raise exception_to_check('tried for %1.1f seconds, gave up' % try_time)\n return f_retry\n return deco_retry", "def _RunWithRetries(self, callback, error_matcher):\n for i in xrange(FLAGS.gcloud_num_retries):\n try:\n return callback()\n except Exception as e: # pylint: disable=broad-except\n if not error_matcher(e):\n raise\n # Use randomized exponential backoff, like methods in\n # googleapiclient.http.\n retry_seconds = random.random() * 2**(i + 1)\n logging.warning('Request raised an error: %s\\n'\n 'Will retry in %f seconds.', e, retry_seconds)\n time.sleep(retry_seconds)\n\n return callback()", "def ensure_redis_call(f, *args, **kwargs):\n attempts = kwargs.pop('attempts', 5)\n\n for i in six.moves.range(attempts + 1):\n try:\n return f(*args, **kwargs)\n\n except (ConnectionError, TimeoutError) as e:\n if i == attempts:\n raise\n else:\n wait = 2 ** i\n msg = (\n 'Will reattempt to execute {} with args={} kwargs={} '\n 'after {} seconds due to exception {}: {}'\n ''.format(f, args, kwargs, wait, type(e).__name__, e)\n )\n print(msg)\n time.sleep(wait)", "def reprovision_and_retry(func):\n @functools.wraps(func)\n def wrapper(*a, **kw):\n errback = kw.get('errback', None)\n if errback is None:\n def errback(e):\n raise e\n def errback_wrapper(e):\n if isinstance(e, UnknownAppID) and 'INITIAL' in OPTIONS:\n try:\n for initial in OPTIONS['INITIAL']:\n provision(*initial) # retry provisioning the initial setup\n func(*a, **kw) # and try the function once more\n except Exception(new_exc):\n errback(new_exc) # throwing the new exception\n else:\n errback(e) # not an instance of UnknownAppID - nothing we can do here\n kw['errback'] = errback_wrapper\n return func(*a, **kw)\n return wrapper", "def retry(times):\n return repeat_with_success_at_least(times, 1)", "def _retry_occurred(self):", "def backoff(start_sleep_time=0.1, border_sleep_time=30, factor=2, jitter=True):\n if start_sleep_time < 0.001:\n logger.warning('start_sleep_time fewer than 0.001 and will be set to 0.001')\n start_sleep_time = 0.001\n\n def decorator(target):\n @wraps(target)\n def retry(*args, **kwargs):\n attempt = 0\n while True:\n sleep_time = _sleep_time(start_sleep_time, border_sleep_time, factor, attempt, jitter)\n try:\n attempt += 1\n sleep(sleep_time)\n ret = target(*args, **kwargs)\n except Exception as e:\n logger.error(f'Exception is catched {e}')\n logger.warning(f'Wait fo {sleep_time} seconds and try again')\n else:\n return ret\n return retry\n return decorator", "def call_with_retries(function, retry_count, retry_delay):\n logger.info(\"Calling function: %s with retry count: %s, retry_delay: %s\",\n function, retry_count, retry_delay)\n for retry in range(1, int(retry_count) + 1):\n logger.info(\"Attempt number: %s\", retry)\n try:\n return function()\n # pylint: disable=broad-except\n except Exception as verify_exception:\n logger.info(\"Verify exception: %s\", verify_exception)\n time.sleep(float(retry_delay))\n if retry > int(retry_count):\n logger.info(\"Exceeded max retries! Reraising last exception\")\n raise\n assert False, \"Should never get here.\"", "def db_transaction_retry_wrapper(fn):\n @ft.wraps(fn)\n def f(self, *args, **kwargs):\n backoffGenerator = util.backoffSecondsGenerator()\n try:\n while True:\n try:\n result = fn(self, *args, **kwargs)\n return result\n except exceptions_eligible_for_retry:\n waitInSeconds = backoffGenerator.next()\n try:\n self.logger.critical('server failure in db transaction - '\n 'retry in %s seconds',\n waitInSeconds)\n except AttributeError:\n pass\n try:\n self.responsiveSleep(waitInSeconds,\n 10,\n \"waiting for retry after failure in db \"\n \"transaction\")\n except AttributeError:\n time.sleep(waitInSeconds)\n except KeyboardInterrupt:\n return\n return f", "def _retry(*, task, signature_kwargs, retries):\n if retries < MAX_RETRIES:\n step = task.signature(**signature_kwargs)\n queue = step.options.get(\"queue\", task.queue)\n step.options[\"queue\"] = f\"{queue}-delay\"\n step.kwargs[\"retries\"] = retries + 1\n on_commit(step.apply_async)\n else:\n raise MaxRetriesExceededError", "def retry(\n action_type: ActionType,\n default_return: Any,\n) -> Callable:\n\n def decorator(func: Callable) -> Callable:\n @wraps(func)\n def result(*args: Any, **kwargs: Any) -> List[Optional[Row]]:\n func_delay = config.execution.TASK_RETRY_DELAY\n method_name = func.__name__\n self = args[0]\n\n raised_ex = None\n for attempt in range(config.execution.TASK_RETRY_COUNT + 1):\n try:\n # Create ExecutionLog with status in_processing or retrying\n if attempt:\n self.log_retry(action_type)\n else:\n self.log_start(action_type)\n # Run access or erasure request\n return func(*args, **kwargs)\n except BaseException as ex: # pylint: disable=W0703\n func_delay *= config.execution.TASK_RETRY_BACKOFF\n logger.warning(\n f\"Retrying {method_name} {self.traversal_node.address} in {func_delay} seconds...\"\n )\n sleep(func_delay)\n raised_ex = ex\n self.log_end(action_type, raised_ex)\n return default_return\n\n return result\n\n return decorator", "def retry(initial_delay,\n max_delay,\n factor=2.0,\n jitter=0.25,\n is_retriable=None):\n if factor < 1:\n raise ValueError('factor must be >= 1; was %f' % (factor,))\n\n if jitter >= 1:\n raise ValueError('jitter must be < 1; was %f' % (jitter,))\n\n # Generator to compute the individual delays\n def delays():\n delay = initial_delay\n while delay <= max_delay:\n yield delay * random.uniform(1 - jitter, 1 + jitter)\n delay *= factor\n\n def wrap(fn):\n \"\"\"Wrapper function factory invoked by decorator magic.\"\"\"\n\n def wrapped_fn(*args, **kwargs):\n \"\"\"The actual wrapper function that applies the retry logic.\"\"\"\n for delay in delays():\n try:\n return fn(*args, **kwargs)\n except Exception as e: # pylint: disable=broad-except)\n if is_retriable is None:\n continue\n\n if is_retriable(e):\n time.sleep(delay)\n else:\n raise\n return fn(*args, **kwargs)\n\n return wrapped_fn\n\n return wrap", "def keep_run(exception_sleep=10):\n\n def decorated(fn):\n @wraps(fn)\n def wrapped(*args, **kwargs):\n try:\n return fn(*args, **kwargs)\n except Exception as e:\n logging.exception(e)\n if exception_sleep > 0:\n time.sleep(exception_sleep)\n\n return wrapped\n\n return decorated", "def retry_after(self, delay: float, request_method: Callable, *args: Any, **kwargs: Any) -> 'NetworkResponse':\n raise NotImplementedError # pragma: no cover", "def _download_retry(self, product, wait, timeout):\n\n def decorator(download):\n def download_and_retry(*args, **kwargs):\n # initiate retry loop\n start_time = datetime.now()\n stop_time = start_time + timedelta(minutes=timeout)\n product.next_try = start_time\n retry_count = 0\n not_available_info = \"The product could not be downloaded\"\n # another output for notebooks\n nb_info = NotebookWidgets()\n\n while \"Loop until products download succeeds or timeout is reached\":\n\n datetime_now = datetime.now()\n\n if datetime_now >= product.next_try:\n product.next_try += timedelta(minutes=wait)\n try:\n return download(*args, **kwargs)\n\n except NotAvailableError as e:\n if not getattr(self.config, \"order_enabled\", False):\n raise NotAvailableError(\n f\"Product is not available for download and order is not supported for\"\n f\" {self.provider}, {e}\"\n )\n not_available_info = e\n pass\n\n if datetime_now >= product.next_try and datetime_now < stop_time:\n wait_seconds = (\n datetime_now - product.next_try + timedelta(minutes=wait)\n ).seconds\n retry_count += 1\n retry_info = (\n f\"[Retry #{retry_count}] Waited {wait_seconds}s, trying again to download ordered product\"\n f\" (retry every {wait}' for {timeout}')\"\n )\n logger.debug(not_available_info)\n # Retry-After info from Response header\n if hasattr(self, \"stream\"):\n retry_server_info = self.stream.headers.get(\n \"Retry-After\", \"\"\n )\n if retry_server_info:\n logger.debug(\n f\"[{self.provider} response] Retry-After: {retry_server_info}\"\n )\n logger.info(retry_info)\n nb_info.display_html(retry_info)\n product.next_try = datetime_now\n elif datetime_now < product.next_try and datetime_now < stop_time:\n wait_seconds = (product.next_try - datetime_now).seconds + (\n product.next_try - datetime_now\n ).microseconds / 1e6\n retry_count += 1\n retry_info = (\n f\"[Retry #{retry_count}] Waiting {wait_seconds}s until next download try\"\n f\" for ordered product (retry every {wait}' for {timeout}')\"\n )\n logger.debug(not_available_info)\n # Retry-After info from Response header\n if hasattr(self, \"stream\"):\n retry_server_info = self.stream.headers.get(\n \"Retry-After\", \"\"\n )\n if retry_server_info:\n logger.debug(\n f\"[{self.provider} response] Retry-After: {retry_server_info}\"\n )\n logger.info(retry_info)\n nb_info.display_html(retry_info)\n sleep(wait_seconds)\n elif datetime_now >= stop_time and timeout > 0:\n if \"storageStatus\" not in product.properties:\n product.properties[\"storageStatus\"] = \"N/A status\"\n logger.info(not_available_info)\n raise NotAvailableError(\n f\"{product.properties['title']} is not available ({product.properties['storageStatus']})\"\n f\" and could not be downloaded, timeout reached\"\n )\n elif datetime_now >= stop_time:\n raise NotAvailableError(not_available_info)\n\n return download(*args, **kwargs)\n\n return download_and_retry\n\n return decorator", "def query_retry(self, f, *args, **kwargs):\n\n num_retries = CONF.watcher_datasources.query_max_retries\n timeout = CONF.watcher_datasources.query_timeout\n for i in range(num_retries):\n try:\n return f(*args, **kwargs)\n except Exception as e:\n LOG.exception(e)\n self.query_retry_reset(e)\n LOG.warning(\"Retry {0} of {1} while retrieving metrics retry \"\n \"in {2} seconds\".format(i+1, num_retries, timeout))\n time.sleep(timeout)", "def i2c_retry(n):\n def decorator(func):\n @wraps(func)\n def func_wrapper(*args, **kwargs):\n for _ in range(n-1):\n try:\n return func(*args, **kwargs)\n except OSError:\n time.sleep(0.05) # <-- allow the I2C bus to chill-out before we try again\n return func(*args, **kwargs)\n\n return func_wrapper\n\n return decorator", "def _retryProtect(m):\n\tdef f(self, *args, **kwargs):\n\t\ttry:\n\t\t\treturn m(self, *args, **kwargs)\n\t\texcept:\n\t\t\tself.reset()\n\t\t\treturn m(self, *args, **kwargs)\n\n\treturn functools.update_wrapper(f, m)", "def retry_request(\n self,\n tapi_exception,\n error_message,\n repeat_number,\n response,\n request_kwargs,\n api_params,\n **kwargs\n ):\n return False", "def retry_on_deadlock(func):\n @functools.wraps(func)\n def decorate(*args, **kw):\n # We can't use RetryDecorator from oslo_service directly because\n # it runs a decorated function in a different thread and hence\n # the function doesn't have access to authentication context\n # set as a thread local variable.\n # The solution is to reuse RetryDecorator but explicitly set\n # auth context in the new thread that RetryDecorator spawns.\n # In order to do that we need an additional helper function.\n\n auth_ctx = ctx.ctx() if ctx.has_ctx() else None\n\n return _with_auth_context(auth_ctx, func, *args, **kw)\n\n return decorate", "def retry_on_exception(func, num_tries=40, period_in_seconds=DEFAULT_PERIOD,\n error=None):\n for x in range(num_tries):\n try:\n return func()\n except Exception as e:\n if error and e.error_code == error:\n logging.info(\"Skipping on exception %s\" % error)\n break\n if x == (num_tries - 1):\n raise RuntimeError(\"Failed on %d tries: %s\" % (num_tries, e))\n logging.info(\"Got exception %s on try number %s...\" % (e, x))\n\n time.sleep(period_in_seconds)", "def set_retry_timeout(self, retry_timeout):", "def retry(times: int, except_callback: Optional[Callable[..., Any]] = None):\n\n def wrap(func):\n @wraps(func)\n def retry_it(*args, **kwargs):\n nonlocal times\n if times < 0: # forever\n times = 1 << 32\n\n for i in range(1, times + 1):\n try:\n r = func(*args, **kwargs)\n return r\n except Exception as err:\n if except_callback is not None:\n except_callback(err, i)\n\n if i == times:\n raise err\n\n return retry_it\n\n return wrap", "def retry_on_exception(func, max_attempts=5, ignored_exceptions=(StaleElementReferenceException, InvalidElementStateException)):\r\n attempt = 0\r\n while attempt < max_attempts:\r\n try:\r\n return func()\r\n except ignored_exceptions:\r\n world.wait(1)\r\n attempt += 1\r\n\r\n assert_true(attempt < max_attempts, 'Ran out of attempts to execute {}'.format(func))", "def _retry_request(self, request, timeout=2, attempts=3):\n import googleapiclient\n\n try:\n return request.execute()\n except BrokenPipeError as ex:\n if attempts > 0:\n time.sleep(timeout)\n return self._retry_request(request, timeout * 2, attempts - 1)\n raise ex\n except googleapiclient.errors.HttpError as ex:\n log_verbose_traceback(ex)\n raise ex\n except Exception as ex:\n log_verbose_traceback(ex)\n raise ex", "def retry(self, envelope):\n # type: (RetryPolicy, Envelope) -> None\n raise NotImplementedError()", "def test_retry_run(self):\n pass", "def __call__(self, func, *args):\n\n def wrapped_func(*args, **kwargs):\n\n count = 0\n while True:\n response = func(*args, **kwargs)\n if response.status_code in range(200, 300):\n return response\n elif response.status_code >= 500:\n if count == self.retry_count:\n return response\n else:\n time.sleep(pow(2, count))\n count += 1\n continue\n else:\n return response\n\n return wrapped_func", "def _backoff_handler(details):\n LOGGER.debug('[Backoff]: Trying again in %f seconds after %d tries calling %s',\n details['wait'],\n details['tries'],\n details['target'].__name__)", "def sleep_decorator(function):\n\n def wrapper(*args, **kwargs):\n sleep(2)\n return function(*args, **kwargs)\n return wrapper", "def _retry(self, f):\n count = 0\n while True:\n try:\n return f()\n # http://initd.org/psycopg/docs/module.html#psycopg2.DatabaseError\n # handle operational error - memory allocation, unexpected disconnect\n except psycopg2.OperationalError, oe:\n count += 1\n if count < self._max_retries:\n LOGGER.warn(\"Transient Error Received %s \", oe)\n time.sleep(self._retry_period)\n else:\n LOGGER.error(\"Unrecoverable Error %s\", oe)\n raise oe\n # other database errors - integrity, internal, programming error etc\n except psycopg2.DatabaseError, de:\n LOGGER.error(\"Database Error %s\", de)\n raise de\n # interface errors\n except psycopg2.Error, e:\n raise e", "async def _retry_get(url: str, retries: int, **kwargs):\r\n retries -= 1\r\n if retries >= 0:\r\n logger.warning(\r\n f\"Retrying request to {url}. Retries remaining: {retries}\")\r\n return await asyncio.create_task(\r\n self.get(url, retries, **kwargs))\r\n logger.error(\r\n f\"Max retries exceeded: {url}. URL can not be navigated.\")", "def retry_exception(num, delay, func, exception=Exception, *args, **kwargs):\n i = 0\n while i <= num:\n try:\n func(*args, **kwargs)\n time.sleep(delay)\n except exception: # pylint: disable=broad-except\n i += 1\n continue\n return\n raise StopIteration(\"Function did not finished successfully\")", "def _retry(self, result, method, url, params_dict, **kwargs):\n return result", "def retry(self, times):\n return Retry((requests.ConnectionError, requests.Timeout), times)", "def test_exp_backoff():\n stream = ReconnectingTweetStream('user', 'pass', initial_wait=1, max_wait=5,\n error_cb=error_callback)\n # A connection failure should happen automatically because of patch\n assert_raises(ConnectionError, stream.next)\n # By now, callback should have been invoked 3 times (1s, 2s, 4s)\n assert callback_invoked == 3", "def test_retry_other_exception():\n\n exceptions_in = [\n RuntimeError(\"what?\"),\n NotImplementedError(\"how?\"),\n RuntimeError(\"no!\"),\n ]\n actual = []\n expected = [1.0, 1.5, 2.25]\n\n def sleep(wait: float):\n actual.append(wait)\n\n @retry(\n (NotImplementedError, RuntimeError),\n retries=4,\n delay=1.0,\n backoff=1.5,\n sleep=sleep,\n )\n def explode():\n raise exceptions_in.pop()\n\n try:\n explode()\n raise AssertionError(\"IndexError expected\")\n except IndexError:\n assert actual == expected", "def _retry_on_exception(\n exception: Union[Exception, Tuple[Exception]],\n regex: Optional[str] = None,\n max_retries: int = MAX_POLLS,\n retry_interval_s: int = POLL_INTERVAL,\n):\n\n def dec(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n def try_catch_exc():\n try:\n value = func(*args, **kwargs)\n return value\n except Exception as e:\n if not isinstance(e, exception) or (\n regex and not re.search(regex, str(e))\n ):\n raise e\n return e\n\n for _ in range(max_retries):\n ret = try_catch_exc()\n if not isinstance(ret, Exception):\n break\n time.sleep(retry_interval_s)\n if isinstance(ret, Exception):\n raise ret\n return ret\n\n return wrapper\n\n return dec", "def retry_strategy(self, retry_strat):\n self.retry_strategy = retry_strat\n return self", "def retry(action, attempts=5, sleeptime=60, max_sleeptime=5 * 60,\n sleepscale=1.5, jitter=1, retry_exceptions=(Exception,),\n cleanup=None, args=(), kwargs={}, log_args=True):\n assert callable(action)\n assert not cleanup or callable(cleanup)\n\n action_name = getattr(action, '__name__', action)\n if log_args and (args or kwargs):\n log_attempt_args = (\"retry: calling %s with args: %s,\"\n \" kwargs: %s, attempt #%d\",\n action_name, args, kwargs)\n else:\n log_attempt_args = (\"retry: calling %s, attempt #%d\",\n action_name)\n\n if max_sleeptime < sleeptime:\n log.debug(\"max_sleeptime %d less than sleeptime %d\",\n max_sleeptime, sleeptime)\n\n n = 1\n for _ in retrier(attempts=attempts, sleeptime=sleeptime,\n max_sleeptime=max_sleeptime, sleepscale=sleepscale,\n jitter=jitter):\n try:\n logfn = log.info if n != 1 else log.debug\n logfn_args = log_attempt_args + (n, )\n logfn(*logfn_args)\n return action(*args, **kwargs)\n except retry_exceptions:\n log.debug(\"retry: Caught exception: \", exc_info=True)\n if cleanup:\n cleanup()\n if n == attempts:\n log.info(\"retry: Giving up on %s\", action_name)\n raise\n continue\n finally:\n n += 1", "def backoff(\n max_tries=constants.BACKOFF_DEFAULT_MAXTRIES,\n delay=constants.BACKOFF_DEFAULT_DELAY,\n factor=constants.BACKOFF_DEFAULT_FACTOR,\n exception_handler=always_retry,\n before_delay_handler=noop,\n after_delay_handler=noop):\n if max_tries <= 0:\n raise ValueError((\n 'Max tries must be greater than 0; got {!r}'\n ).format(max_tries))\n\n if delay <= 0:\n raise ValueError((\n 'Delay must be greater than 0; got {!r}'\n ).format(delay))\n\n if factor <= 1:\n raise ValueError((\n 'Backoff factor must be greater than 1; got {!r}'\n ).format(factor))\n\n def outter(f):\n def inner(*args, **kwargs):\n m_max_tries, m_delay = max_tries, delay # make mutable\n while m_max_tries > 0:\n try:\n retval = f(*args, **kwargs)\n except Exception as ex:\n m_max_tries -= 1 # consume an attempt\n if m_max_tries < 0:\n # run out of tries\n raise\n if exception_handler(ex):\n logger.info(\n (\n 'backoff retry for: %r (max_tries=%r, '\n 'delay=%r, factor=%r)'\n ),\n f,\n max_tries,\n delay,\n factor\n )\n before_delay_handler(ex)\n time.sleep(m_delay) # wait...\n after_delay_handler(ex)\n m_delay *= factor # make future wait longer\n else:\n # exception handler gave up\n raise\n else:\n # done without errors\n return retval\n return inner\n return outter", "def throttle(f):\n def wrapper(self, *args, **kwargs):\n if self.made_requests < self.max_requests:\n time.sleep(self.delay)\n f(self, *args, **kwargs)\n self.made_requests += 1\n else:\n raise Exception, 'maximum request limit reached'\n return wrapper", "def __call__(self, func):\n timeouts = _exponential_timeout_generator(\n self._initial, self._maximum, self._multiplier, self._deadline)\n\n @general_helpers.wraps(func)\n def func_with_timeout(*args, **kwargs):\n \"\"\"Wrapped function that adds timeout.\"\"\"\n kwargs['timeout'] = next(timeouts)\n return func(*args, **kwargs)\n\n return func_with_timeout", "def retry_on_bad_auth(func):\n @wraps(func)\n def retry_version(self, *args, **kwargs):\n while True:\n try:\n return func(self, *args, **kwargs)\n except trolly.ResourceUnavailable:\n sys.stderr.write('bad request (refresh board id)\\n')\n self._board_id = None\n self.save_key('board_id', None)\n except trolly.Unauthorised:\n sys.stderr.write('bad permissions (refresh token)\\n')\n self._client = None\n self._token = None\n self.save_key('token', None)\n return retry_version" ]
[ "0.78645414", "0.7704353", "0.7669671", "0.75984997", "0.7546372", "0.7504667", "0.7501432", "0.7480629", "0.7442303", "0.7435825", "0.7430782", "0.7420958", "0.7402073", "0.73976827", "0.73973614", "0.73926425", "0.7340207", "0.7321245", "0.73147404", "0.7286289", "0.71659213", "0.71657705", "0.7124064", "0.71134335", "0.7108941", "0.7088581", "0.70566964", "0.70436734", "0.701618", "0.7015194", "0.6990441", "0.69771713", "0.6975653", "0.69677156", "0.6967325", "0.6933435", "0.69051903", "0.6899754", "0.6896861", "0.6895148", "0.6872632", "0.68645215", "0.6860671", "0.681825", "0.68105775", "0.6806565", "0.6798738", "0.67678946", "0.6745121", "0.67376757", "0.6701001", "0.66984916", "0.6690865", "0.66696894", "0.6666534", "0.66140044", "0.6590729", "0.6583969", "0.6581805", "0.65645623", "0.65623635", "0.65425193", "0.6516148", "0.64965785", "0.64727265", "0.6470959", "0.64624906", "0.64241534", "0.64142406", "0.6411878", "0.6410126", "0.6404219", "0.6400244", "0.6396212", "0.6383077", "0.6365411", "0.6356721", "0.63458264", "0.6336315", "0.6296111", "0.6290933", "0.62731946", "0.62561923", "0.6250537", "0.62483037", "0.6247354", "0.62375474", "0.6227956", "0.6224689", "0.6198596", "0.618719", "0.61859876", "0.6185524", "0.6183227", "0.6172919", "0.6153959", "0.61023635", "0.60744965", "0.60663515", "0.60658723" ]
0.6971506
33
Repeat calls to func with specified arguments.
def repeatfunc(func, times=None, *args): if times is None: return starmap(func, repeat(args)) return starmap(func, repeat(args, times))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def repeat_func(func, *args, **kwargs):\n if kwargs:\n return starmap(lambda args, kwargs: func(*args, **kwargs),\n repeat((args, kwargs))\n )\n else:\n return starmap(func, repeat(args))", "def repeatfunc(cls, func, times=None, *args):\n if times is None:\n return starmap(func, repeat(args))\n return starmap(func, repeat(args, times))", "def repeatfunc(func, n, *args):\n return starmap(func, repeat(args, n))", "def repeat(self, fn, *args, **kwargs):\n return repeat_n_times(self.n, fn, *args, **kwargs)", "def repeatedly(func, /, *args, **kwargs):\n func = to_callable(func)\n try:\n while True:\n yield func(*args, **kwargs)\n except StopIteration as e:\n yield from stop_seq(e)", "def make_func_repeater(f, x):\n\n def repeat(i, x=x):\n if i == 0:\n return x\n else:\n return repeat(i-1, f(x))\n return repeat", "def loop(func):\n def wrapper(*a, **b):\n while True:\n func(*a, **b)\n return wrapper", "def timeit_(func, args, reps=1000, times=7):\n time = min(timeit.Timer(functools.partial(func, *args)).repeat(times, reps))\n print(\"{0} took {1}\".format(func.__name__, time))", "def repeat(fun, n):\n for i in range(n):\n yield fun()", "def repeat(num_times):\n\n def decorator_repeat(func):\n \"\"\"\n defines wrapper_repeat(*args, **kwargs)\n\n :returns: wrapper_repeat\n \"\"\"\n\n @functools.wraps(func)\n def wrapper_repeat(*args, **kwargs):\n \"\"\"\n func(*args, **kwargs) num_times\n\n :return: last return value\n \"\"\"\n for _ in range(num_times):\n value = func(*args, **kwargs)\n return value\n\n return wrapper_repeat\n\n return decorator_repeat", "def repeat_n_times(n, fn, *args, **kwargs):\n if args:\n my_args = _transpose_list_of_lists(\n [_maybe_repeat(arg, n) for arg in args])\n else:\n my_args = [[] for _ in range(n)]\n my_kwargs = [{} for _ in range(n)]\n for k, v in six.iteritems(kwargs):\n vals = _maybe_repeat(v, n)\n for i in range(n):\n my_kwargs[i][k] = vals[i]\n\n # construct lists of functions\n fns = _maybe_repeat(fn, n)\n outputs = [fns[i](*my_args[i], **my_kwargs[i]) for i in range(n)]\n if isinstance(outputs[0], tuple):\n outputs = list(zip(*outputs))\n outputs = tuple([list(o) for o in outputs])\n return outputs", "def retrying(func, *retry_args, **retry_kwargs):\n yield retriable(*retry_args, **retry_kwargs)(func)", "def repeat_every(repeats=5, every=2):\n\n def repeat_wrapper(func):\n @functools.wraps(func)\n def func_wrapper(*args, **kwargs):\n for _ in range(repeats):\n value = func(*args, **kwargs)\n if value:\n return value\n time.sleep(every)\n\n return func_wrapper\n\n return repeat_wrapper", "def retry(func, repeat=3, delay=tickTime * 2):\n\twhile repeat:\n\t\tresult = func()\n\n\t\tif result is None and delay and repeat != 1:\n\t\t\tsleep(delay)\n\n\t\telse:\n\t\t\treturn result\n\n\t\trepeat -= 1", "def loop(func, n):\n for i in range(n):\n func()", "def timeit(fun, *args, repeat=1, return_out=False, **kwargs):\n import time\n\n assert repeat >= 1\n out = None\n t_st = time.time()\n for i in range(repeat):\n out = fun(*args, **kwargs)\n t_en = time.time()\n t_el = t_en - t_st\n if return_out:\n return t_el, out\n else:\n return t_el", "def func_with_timeout(*args, **kwargs):\n kwargs['timeout'] = next(timeouts)\n return func(*args, **kwargs)", "def run_with_args(self):\n while True:\n if self.cancelled:\n return\n self.func(self.args)\n time.sleep(self.sleep_time / 1000.00)", "def wrapper(*args, **kwargs):\n start = time.time()\n\n return func(*args, **kwargs), int(1000 * (time.time() - start))", "def repeated(f, n, x):\n if n == 1:\n return f(x)\n else:\n return repeated(f,n-1,f(x))", "def iterate(func: Callable[..., T], x: T, *args, index: Index = None):\n func = to_callable(func)\n index = to_index_seq(index)\n\n if index is None and not args:\n out = _iterate(func, x)\n elif index is None:\n out = _iterate_n(func, (x, *args))\n else:\n if not args:\n out = _iterate_indexed(func, index, x)\n else:\n out = _iterate_indexed_n(func, index, (x, *args))\n\n return Iter(out)", "def repeat(fn):\n def repeated():\n i = 0\n while i < random_test_iterations:\n fn()\n i += 1\n # nosetest runs functions that start with 'test_'\n repeated.__name__ = fn.__name__\n return repeated", "def timer(func, reps=reps, *args):\r\n startTime = time.clock()\r\n for i in range(reps):\r\n func(*args)\r\n totTime = time.clock() - startTime\r\n return totTime, (totTime / reps)", "def repeat_every(seconds, fn):\n def wrapper(scheduler):\n try:\n fn()\n scheduler.enter(seconds, 1, wrapper, (scheduler,))\n except:\n print('Error executing function')\n\n scheduler = sched.scheduler(time.time, time.sleep)\n scheduler.enter(seconds, 1, wrapper, (scheduler,))\n scheduler.run()", "def retryCall(fn, args=None, keywordArgs=None, failureTester=None, sleepManager=None):\n sleepManager = sleepManager or time.SleepManager()\n while True:\n try:\n result = yield fn(*args, **keywordArgs)\n defer.returnValue(result)\n except Exception: # pylint: disable=W0703\n failureTester(failure.Failure())\n yield sleepManager.sleep()", "def run_func_decorator(\n *func_args: list[Any],\n **func_kwargs: dict[str, Any],\n ) -> Any:\n if isinstance(world_size, int):\n dist_launcher(world_size, *func_args, **func_kwargs)\n elif isinstance(world_size, list):\n for procs in world_size:\n dist_launcher(procs, *func_args, **func_kwargs)\n time.sleep(0.5)\n else:\n raise TypeError(\n 'world_size must be an integer or a list of integers.',\n )", "def _call_n(x, f, n, *args, **kwargs):\n return [f(i, x, *args, **kwargs) for i in range(n)]", "def _wrapper(func, args):\n return func(*args)", "def repeat(job, *args, **kwargs):\n\n def _schedule_decorator(decorated_function):\n job.do(decorated_function, *args, **kwargs)\n return decorated_function\n\n return _schedule_decorator", "def repeat(x, repeats, axis=None):\r\n return RepeatOp(axis=axis)(x, repeats)", "def __call__(self, func: Callable):\n\n @wraps(func)\n def timed(*args, **kwargs):\n with self._recreate_cm():\n self.func_name = func.__name__\n self.args = args\n self.kwargs = kwargs\n self.all_args = (*args, *kwargs.values()) if kwargs != {} else args\n return func(*args, **kwargs)\n\n return timed", "def time_it(fn, *args, repetitions=1, **kwargs):\n\n if not isinstance(repetitions, int):\n raise ValueError('Keyword argument `repetitions` must be an integer')\n if repetitions < 0:\n raise ValueError('`repetitions` must be > 0')\n\n begin = time.perf_counter()\n for _ in range(repetitions):\n fn(*args, **kwargs)\n end = time.perf_counter()\n time_elapsed = end - begin\n average_time = time_elapsed / repetitions\n return average_time", "def generate(self, function, args, avg_delay, duration):\n\tfrequency = 1 / avg_delay\n\tend_time = duration + sim.time()\n\tsim.sleep(random.expovariate(frequency))\n\twhile end_time <= sim.time():\n\t\tsim.sleep(random.expovariate(frequency))\n\t\tfunction(*args)", "def _Retry(func, *args, **kwargs):\n retries = _RETRIES\n while True:\n try:\n return func(*args, **kwargs)\n except Exception as e: # pylint: disable=broad-except\n retries -= 1\n if retries > 0:\n log.info('Exception {e} thrown in {func}. Retrying.'.format(\n e=e, func=func.__name__))\n time.sleep(1)\n else:\n raise e", "def cycle(f1, f2, f3):\n def how_many(n):\n def what(x):\n if n >= 1:\n x = f1(x)\n if n >= 2:\n x = f2(x)\n if n >= 3:\n x = f3(x)\n if n > 3:\n return how_many(n - 3)(x)\n else:\n return x\n return what\n return how_many", "def repeat(a, repeats, axis=None):\n return afnumpy.asarray(a).repeat(repeats, axis=axis)", "def __call__(self, func):\n timeouts = _exponential_timeout_generator(\n self._initial, self._maximum, self._multiplier, self._deadline)\n\n @general_helpers.wraps(func)\n def func_with_timeout(*args, **kwargs):\n \"\"\"Wrapped function that adds timeout.\"\"\"\n kwargs['timeout'] = next(timeouts)\n return func(*args, **kwargs)\n\n return func_with_timeout", "def fn(*args, **kwargs):\n pass", "def apply(self, func, *args):\n pass", "def call_orig_func(func, *args, **kwargs):\n return func(*args, **kwargs)", "def times(self, fn):\n for i in range(0, self._):\n fn()\n return self", "async def concurrently_execute(\n func: Callable[[T], Any],\n args: Iterable[T],\n limit: int,\n delay_cancellation: bool = False,\n) -> None:\n it = iter(args)\n\n async def _concurrently_execute_inner(value: T) -> None:\n try:\n while True:\n await maybe_awaitable(func(value))\n value = next(it)\n except StopIteration:\n pass\n\n # We use `itertools.islice` to handle the case where the number of args is\n # less than the limit, avoiding needlessly spawning unnecessary background\n # tasks.\n if delay_cancellation:\n await yieldable_gather_results_delaying_cancellation(\n _concurrently_execute_inner,\n (value for value in itertools.islice(it, limit)),\n )\n else:\n await yieldable_gather_results(\n _concurrently_execute_inner,\n (value for value in itertools.islice(it, limit)),\n )", "def add_to_apply_calls(self, func, *args, **kwargs):\n pass", "def iterate(func, x):\n while True:\n x = func(x)\n yield x", "def do(self, function, args):\n self.continue_event.clear()\n function(*args)\n self.continue_event.wait()", "def repeat(times, intensive_times=None):\n if intensive_times is None:\n return repeat_with_success_at_least(times, times)\n\n casual_test = bool(int(os.environ.get('CUPY_TEST_CASUAL', '0')))\n times_ = times if casual_test else intensive_times\n return repeat_with_success_at_least(times_, times_)", "def remote_call(func):\n @func_utils.getargsfrom(func)\n def rem_func(self, *args, **kwargs):\n return self.call_in_thread_sync(func,args=(self,)+args,kwargs=kwargs,sync=True,same_thread_shortcut=True)\n return rem_func", "def measure_func(func, args, number=1):\n f = partial(func, *args) # pylint: disable=W0142\n while True:\n start = timer()\n r = timeit.repeat(f, number=number, repeat=1)\n if timer() - start > 1: # at least 1 second per measurement\n break\n number *= 2\n return min(r + timeit.repeat(f, number=number, repeat=2)) / number", "def multi_apply(func, *args, **kwargs):\n\n pfunc = partial(func, **kwargs) if kwargs else func\n map_results = map(pfunc, *args)\n return tuple(map(list, zip(*map_results)))", "def retry(func):\n # ... retry MAX_RETRIES times\n # ...\n # make sure you include this for testing:\n # except Exception as exc:\n # print(exc)\n # ...\n # and use wraps to preserve docstring\n #\n @wraps(func)\n def wrapper(*args, **kwargs):\n\n tries = MAX_RETRIES\n while tries > 0:\n try:\n return func(*args, **kwargs)\n except Exception as err:\n print(err)\n\n tries -= 1\n\n raise MaxRetriesException\n\n return wrapper", "def does_it_run(func, args):\n \n if args is None:\n func()\n else:\n func(*args)", "def given_func(arg_1, arg_2, arg_3, arg_4):\n return arg_1 + arg_2 + arg_3 + arg_4", "def do_twice(f):\n f()\n f()", "def do_twice(f):\n f()\n f()", "def apply_function(f, args):\n if len(signature(f).parameters) == len(args):\n func = curry(f)\n for arg_value in args:\n func = func(arg_value)\n return func()\n else:\n raise Exception(\"the number of function's parameter is not matched args, len(args): \", len(args))", "def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)", "def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)", "def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)", "def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)", "def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)", "def repeat_nd(x, reps):\n return RepeatND(reps)(x)", "def basic_parallel_loop(func, *args, parallel=True):\n if parallel is True:\n results = Parallel(n_jobs=cpu_count())(delayed(func)(*a) for a in args[0])\n else:\n results = []\n for a in args[0]:\n results.append(func(*a))\n\n return results", "def benchmarkFunc(iter, args=()):\n def decorator(func):\n benchmarkFuncs.append((func, args, iter))\n return func\n return decorator", "def apply(self, func):\r\n return func(**self.kwargs)", "def sequential(self, func, args_dict=None):\n for uri, cf in self._cfs.items():\n args = self._process_args_dict(cf, uri, args_dict)\n func(*args)", "def function(args):\n pass", "def parallelizer(func, arg=False):\n if arg:\n func(arg)\n else:\n func()", "def func_wrapper():\n set_interval_sequence(functions[1:] + functions[:1], sec)\n functions[0]()", "def retry(times,function,*args,**kwargs):\n app_conntext=kwargs.get(\"app-context\")\n while times>0:\n times-=1\n try:\n if app_conntext:\n with app_conntext.app_context() as actx:\n function(*args)\n else:\n function(*args,**kwargs)\n print(function.__name__,\"Ran sucessfully with args \",args,\" and \",kwargs)\n return True # no exception\n except Exception as e:\n #can be any exception so try again\n print(function.__name__,\"exception \",e)\n print(function.__name__,\"failed with \",args,\" and \",kwargs)\n return False", "def __call__(self):\n # apply(self.func, self.args)\n self.func(*self.args)", "def repeater(seconds):\n return lambda function: TwistedRepeater(function, seconds)", "def repeat_circle(obj, *loop_args):\n # if loop_args == (): # tuple is empty\n if not loop_args: # tuple is empty\n loop_args = (5, 201, 5)\n obj.begin_fill()\n for n in range(loop_args[0], loop_args[1], loop_args[2]):\n obj.circle(n)\n obj.end_fill", "def time_func(func, *args):\r\n times = list()\r\n for _ in range(3):\r\n t1 = time.time()\r\n func(*args)\r\n times.append(time.time() - t1)\r\n return min(times)", "def double_rapper(func):\n @wraps(func)\n def rapper(*args, **kwargs):\n print('I am going to run {}'.format(func.__name__))\n func(*args, **kwargs)\n print('{} finished'.format(func.__name__))\n return rapper", "def apply(self, func, *args, **kwargs):\n pass", "def call_repeatedly(interval, function, args):\n stopped = threading.Event()\n\n def loop():\n while not stopped.wait(interval):\n function(**args)\n\n threading.Thread(target=loop).start()\n\n # return the thread closing handle\n return stopped.set", "def wrapped_fn(*args, **kwargs):\n for delay in delays():\n try:\n return fn(*args, **kwargs)\n except Exception as e: # pylint: disable=broad-except)\n if is_retriable is None:\n continue\n\n if is_retriable(e):\n time.sleep(delay)\n else:\n raise\n return fn(*args, **kwargs)", "def repeat(obj, times=None):\n if times is None:\n return Iter(itertools.repeat(obj))\n return Iter(itertools.repeat(obj, times))", "def retry(times):\n return repeat_with_success_at_least(times, 1)", "def execute_with_retry(f, args=[], kwargs={}, retry_on=(Exception,),\n max_tries=3, sleep=5):\n attempt = 0\n result = None\n while attempt < max_tries:\n attempt += 1\n try:\n result = f(*args, **kwargs)\n break\n except retry_on, e:\n if attempt >= max_tries:\n raise e\n log(\"Function call failed ('%s': %i/%i).\\n\"\n \"Reason: %s.\\n\"\n \"Wait for %i sec before retry...\"\n % (f.__name__, attempt, max_tries, str(e), sleep))\n time.sleep(sleep)\n return result", "def variadic_args(self, /, *args, **kwargs):\n return self._func(args, **kwargs)", "def do_a_thing_with_retries(function, max_tries: int, *args: Any) -> Any:\n\n run_results = {}\n func_return = None\n has_fails = False\n\n for tries in range(max_tries):\n try:\n if(tries < max_tries):\n if (args):\n func_return = function(*args)\n else:\n func_return = function()\n run_results[tries] = {\"status\": \"success\", \"error\": \"\"}\n break\n except Exception as ex:\n run_results[tries] = {\"status\": \"fail\", \"error\": ex}\n has_fails = True\n next\n \n if has_fails:\n raise FunctionFailedException(\"Failed\", run_results)\n return func_return", "def fun_par_dict(fun: Callable, *args):\n if len(args) > 0:\n return fun(*args[:-1], **args[-1])\n else:\n return fun()", "def map_multi_args(self, func, iterable, chunksize=None):\n assert self._state == RUN\n return self.map_async(one_to_many(func), iterable, chunksize).get()", "def lift(f: Callable[..., Data]) -> LiftedFunc:\n def inner(*args: Result) -> Result:\n out = []\n for args1 in itertools.product(*args):\n val = f(*args1)\n out.append(val)\n return out\n return inner", "def do_four(f):\n do_twice(f)\n do_twice(f)", "def do_four(f):\n do_twice(f)\n do_twice(f)", "def retry_call(\n callabl: Callable,\n args=None,\n kwargs=None,\n exceptions: Tuple[Any, ...] = (),\n retries: int = 10,\n wait: float = 0.1,\n) -> Any:\n\n if args is None:\n args = ()\n if kwargs is None:\n kwargs = {}\n\n for attempt in range(1, retries + 1):\n try:\n return callabl(*args, **kwargs)\n except exceptions:\n if attempt < retries:\n time.sleep(wait)\n else:\n raise", "def call_with_retries(function, max_retries=10,\n exception_types=(Exception),\n _args=(), _kwargs={}):\n assert max_retries >= 0\n\n retries = 0\n last_exc = Exception('Unknown exception')\n while retries <= max_retries:\n try:\n return function(*_args, **_kwargs)\n except exception_types as exc:\n retries += 1\n wait = 2.0 ** retries * 0.1 + (random.randint(0, 1000) / 1000)\n time.sleep(wait)\n last_exc = exc\n raise last_exc", "def retry_exception(num, delay, func, exception=Exception, *args, **kwargs):\n i = 0\n while i <= num:\n try:\n func(*args, **kwargs)\n time.sleep(delay)\n except exception: # pylint: disable=broad-except\n i += 1\n continue\n return\n raise StopIteration(\"Function did not finished successfully\")", "def func(*args, **kwargs):\n return call(*args, **kwargs) # pylint: disable = E1102", "def retry(func, *args, **kwargs):\n @functools.wraps(func)\n def wrapper(*w_args, **w_kwargs):\n w_kwargs.update(kwargs)\n return retry_function_on_deadlock(func, *w_args, **w_kwargs)\n\n return wrapper", "def retry(maxRetries, *exceptions):\n def _doDecoration(fn):\n def _doRetry(*args, **kwargs):\n retries = 0\n while retries <= maxRetries:\n try:\n return fn(*args, **kwargs)\n except tuple(exceptions):\n retries +=1\n if retries > maxRetries:\n raise\n \n return _doRetry\n return _doDecoration", "def run(self, func, *args, **kwargs):\n try:\n ret = func(*args, **kwargs)\n\n if not self._should_handle_return(ret, *args, **kwargs):\n return ret\n except Exception as e:\n if not self._should_handle_error(e, *args, **kwargs):\n raise\n\n if self._on_delay is None:\n raise MaxRetryError('Maximum number of retries exceeded for {0}'.format(self._get_func_name(func)))\n\n retries = 0\n for delay in self._get_delay_sequence(*args, **kwargs):\n retries += 1\n\n if self._should_handle_retry(False):\n self._call_with_sig(self._on_retry, self._sig_retry, (delay, retries), *args, **kwargs)\n\n sleep(delay / 1000)\n\n if self._should_handle_retry(True):\n self._call_with_sig(self._on_retry, self._sig_retry, (delay, retries), *args, **kwargs)\n\n try:\n ret = func(*args, **kwargs)\n\n if not self._should_handle_return(ret, *args, **kwargs):\n return ret\n except Exception as e:\n if not self._should_handle_error(e, *args, **kwargs):\n raise\n\n raise MaxRetryError('Maximum number of retries exceeded for {0}'.format(self._get_func_name(func)))", "def n_ary(func):\n def wrapper(x, *args):\n return x if not args else func(x, wrapper(*args))\n return wrapper", "def retry(\n self, n: int, /, *args, error: Catchable = Exception, sleep=None, **kwargs\n ) -> \"fn\":\n\n func = self._mod.retry(n, self, error=error, sleep=sleep)\n return func(*args, **kwargs)", "def compute_over_actions(f, *args):\n\n '''\n # show the middle results\n for a in zip(*args):\n print(\"a:\", a)\n r = f(*a)\n print(\"r:\", r)\n '''\n\n return sum(f(*a) for a in zip(*args))", "def chained(func):\n def wrapper(*args, **kwargs):\n for xs in func(*args, **kwargs):\n for x in xs:\n yield x\n return wrapper", "def __try_to_run(self, func, args, exceptions=(OSError, PhueException), amount=10,\n sleep=2):\n for _ in range(amount):\n try:\n return func(*args)\n except exceptions as e:\n log.debug(f'Try to run failed, sleeping {sleep}s, {e}')\n time.sleep(sleep)\n log.info(f'Failed to run {func.__name__} with args {args}')\n return None" ]
[ "0.8054019", "0.79899377", "0.77091944", "0.71153104", "0.6889798", "0.68692523", "0.6769637", "0.6652528", "0.65822107", "0.6471077", "0.63860214", "0.6347482", "0.6298246", "0.6255725", "0.6221301", "0.60740733", "0.60466105", "0.6042478", "0.5945209", "0.5926972", "0.59132665", "0.59065264", "0.58505136", "0.5833594", "0.5816164", "0.57995516", "0.57884866", "0.57862437", "0.5784547", "0.5771842", "0.57481277", "0.5735195", "0.5717344", "0.5715894", "0.5666458", "0.56641114", "0.5663524", "0.5658885", "0.56504065", "0.5619027", "0.56073534", "0.5606772", "0.56042683", "0.55975425", "0.5592626", "0.5578117", "0.55756927", "0.55457896", "0.55416405", "0.55295104", "0.55279404", "0.550121", "0.54885066", "0.54885066", "0.54805595", "0.54763454", "0.54763454", "0.54763454", "0.54763454", "0.54763454", "0.5463949", "0.54629314", "0.54581827", "0.5443517", "0.543944", "0.5439086", "0.5438969", "0.5438123", "0.5434044", "0.5428685", "0.5416642", "0.5405134", "0.5402139", "0.5402024", "0.53768533", "0.5376024", "0.5374419", "0.5370247", "0.5363309", "0.5360528", "0.53475344", "0.53354204", "0.5312177", "0.53098047", "0.53030235", "0.53028345", "0.53028345", "0.52989024", "0.52966267", "0.5296287", "0.52855176", "0.52613294", "0.5260612", "0.5256718", "0.52563655", "0.5231825", "0.52303946", "0.5223369", "0.52226704" ]
0.80109245
2
Sends a POST request containing `data` to url. `auth` should be a tuple containing (username, password).
def post(url, data, auth=None, retries=10): if not url.startswith('http://'): url = 'http://' + url request = urllib2.Request(url) if auth: request.add_header('Authorization', 'Basic %s' % b64encode('%s:%s' % auth)) params = urllib.urlencode(data) response = urllib2.urlopen(request, params) return response.read()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_post_request(self, url, data):\n auth = (self.AUTH_ID, self.AUTH_TOKEN)\n headers = {'content-type': 'application/json'}\n return requests.post(url, data=data, auth=auth, headers=headers)", "def _post(self, url, data=None):\n if data is not None:\n data = urllib.urlencode(data)\n return self._request(url, method='POST', payload=data)", "def post(self, data):\n req = self._new_http_request()\n req.add_data(self._prepare_data(data))\n\n return self._urllib2.urlopen(req)", "def post(self, url, data):\n return self.app.post(get_url(url), data=data, follow_redirects=True)", "def request(conn, data):\n headers = {'Authorization': 'Basic %s' % conn.auth_string}\n r = requests.post(conn.endpoint, data=data, headers=headers)\n return parse_response(r.content)", "def post(self, data):\n return requests.post(self.url, headers=self.headers, data=data)", "def post(self, url, data, token=None):\n return self.app.post(url,\n data=json.dumps(data),\n headers=_token_header(token),\n content_type='application/json')", "def httpPost(self, url='', data='', params={}, headers={}):\n\n return self.httpRequest('POST', url, data, params, headers)", "def _http_post(self, path, data):\n # Prepare the request path\n if path[0] == '/':\n path = path[1:]\n path = urljoin(self.servlet_path, path)\n\n # Request the end points\n conn = httplib.HTTPConnection(\"localhost\", self.port)\n conn.request(\"POST\", path, data, {\"Content-Type\": \"application/json\"})\n result = conn.getresponse()\n data = result.read()\n conn.close()\n\n # Convert the response to a string\n return result.status, to_str(data)", "def post(self, url, data):\r\n print(f\"POST {url}\")\r\n print(\"data:\")\r\n self.pp.pprint(data)\r\n response = self.session.post(url, data=data)\r\n print(f\"STATUS {response.status_code}\")\r\n self.print_cookies()\r\n return response", "def post(self, url, data):\n\t\treturn self.session.post(url, data, headers=self.headers)", "def _post(self, url, data=None):\n url = urljoin(self.base_url, url)\n try:\n r = self._make_request(**dict(\n method='POST',\n url=url,\n json=data,\n auth=self.auth,\n timeout=self.timeout,\n hooks=self.request_hooks,\n headers=self.request_headers\n ))\n except requests.exceptions.RequestException as e:\n raise e\n else:\n if r.status_code >= 400:\n _raise_response_error(r)\n\n if r.status_code == 204:\n return None\n return r.json()", "def __http_post(self, data, url_path, with_authentication = True):\n\n res = requests.post(self.__http_build_url(url_path), json = data, headers = self.__http_build_headers(with_authentication))\n res.raise_for_status()\n return res.json()", "def post(path: str, data={}):\n token = get_token()\n headers = {\n \"Authorization\": f\"Bearer {token}\"\n }\n return requests.post(get_base_url() + path, headers=headers, json=data)", "def send_post(self, data):\n\n self.post_data = data\n self.post_data['csrfmiddlewaretoken'] = self.csrftoken\n self.response = self.client.post(self.url, self.post_data, cookies=self.cookies)", "def post_collection(self, data: bytes) -> requests.Response:\n r = requests.post(\n self.config.endpoint,\n auth=(self.config.username, self.config.password),\n data=data,\n )\n return r", "def httpPost(self, url, post_parameters=None):\r\n return self.auth.post(url, post_parameters)", "def _submit(self, endpoint, data):\n full_url = self._prepare_url(endpoint)\n req = self._request(full_url, self._username, self._apikey)\n req.post(data)", "def post(self, url, data=None):\r\n response = self.requestHelper.post(url, data=data)\r\n return self.process(response)", "def post(url, data=None, json=None, **kwargs):\n\n return request('post', url, data=data, json=json, **kwargs)", "def post(self, url, user=None, data=None):\n if user:\n token = self.login(user)\n response = requests.post(\n url_root + url, headers={\"access-token\": token}, json=data\n )\n else:\n response = requests.post(url_root + url, json=data)\n return response.json(), response.status_code", "def make_post_request(client, endpoint, data):\n return client.post(endpoint, data=data)", "async def post(self, url, params=None, json_data=None):\n if self._authenticated:\n return await self.__open(url, method=POST, headers=self._head, params=params, json_data=json_data)", "def post(url, to_error=_default_to_error, data=None, json=None, **kwargs):\n\n return request('post',\n url, to_error=to_error, data=data, json=json, **kwargs)", "def postTo(self,conn,data):\n #log(\"postTo: \"+str(conn))\n conn.request(self.command,self.path,data,self.headers)\n resp = conn.getresponse()\n log(\"postTo: \"+str(resp.status)+\", \"+str(resp.reason)+\", \"+str(resp.version))\n return resp", "def api_post(self, path, data):\n return self._api_request(path, 'POST', data)", "def post_data(data):\n\n # Load Userpass CSV File (InfluxDB)\n # Format: server,port,db,user,pass\n with open('userpass_influx', 'r') as f:\n line = f.readline()\n line = line.strip().split(',')\n host = line[0]\n port = line[1]\n db = line[2]\n user = line[3]\n pswd = line[4]\n\n # Post\n url = \"http://%s:8086/write?db=%s&precision=s\" % (host, db)\n auth = requests.auth.HTTPBasicAuth(\"%s\" % user, \"%s\" % pswd)\n r = requests.post(\"%s\" % url, auth=auth, data=\"%s\" % data)\n\n # Debug\n # print r.status_code\n # print r.headers\n # print r.content", "def _post(self, path, data=None):\n headers = {'content-type': 'application/json'}\n if data:\n data = json.dumps(data)\n r = requests.post(self._url(path), data=data, headers=headers)\n assert r.status_code == 200\n return r", "def post(self, url, data=None, params=None):\n return self.session.post(url=self.base_url + url, data=data,\n params=params)", "def send_request(url, user, passwd, payload):\n response = requests.post(url,\n data=json.dumps(payload),\n auth=(user, passwd),\n verify=False,\n timeout=30)\n\n if response.status_code != 200:\n print(\"Status code {}\".format(response.status_code))\n return ERR_STATUS_CODE\n\n try:\n print(json.dumps(response.json(), indent = 4, sort_keys=True))\n except ValueError:\n print(\"{}\".format(response.text))\n return ERR_WRONG_JSON\n\n return SUCCESS", "def post(self, path, data={}, multipart=False, **kwargs):\n body = None\n headers = {}\n if multipart:\n body, content_type = encode_multipart_formdata(data)\n headers[\"Content-Type\"] = content_type\n else:\n body = urllib.urlencode(data, doseq=True)\n\n if 'headers' in kwargs:\n kwargs['headers'].update(headers)\n\n return self.fetch(\n path,\n method=\"POST\",\n body=body,\n headers=headers,\n **kwargs\n )", "def send_post(self, uri, data=None, ofile=None, ojson=None):\n return self.__send_request('POST', uri=uri, data=data, ofile=ofile, ojson=ojson)", "def post(resource, data, **kwargs):\n\t#print(_endpoint(resource,'POST'))\n\tresp = requests.post(\n\t\t_endpoint(resource, 'POST'),\n\t\tparams=_jsonify_dict_values(kwargs),\n\t\tdata=json.dumps(data),\n\t\theaders=PAYLOAD_HEADERS,\n\t\tverify=SERVER_CERT\n\t)\n\tresp.raise_for_status()\n\treturn resp.json()", "def post(self, path, data):\n response = self._request(\"POST\", path, json.dumps(data))\n return self._handle_response(response)", "def post(self, url, data, headers=None, follow_redirects=False):\n if data is not None:\n if isinstance(data, dict):\n for key, value in data.items():\n if isinstance(value, unicode):\n data[key] = value.encode('utf-8')\n data = urlencode(data, True)\n return self._fetch(url, 'POST', data, headers,\n follow_redirects=follow_redirects)", "def _post_data(self, url, data):\n connection = httplib.HTTPConnection(self.url)\n json_data = json.dumps(data)\n connection.request(\"POST\", url, json_data, HEADERS)\n response = connection.getresponse()\n\n if response.status != 204:\n raise Exception(response.reason)\n\n return response.status", "def post_api_url(uri, data):\n\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n headers = {'Content-Type': 'application/yang-data+json',\n 'Accept': 'application/yang-data+json'}\n\n url = \"https://{host}:{port}/{uri}\".format(host=iosxe_restconf['address'],\n port=iosxe_restconf['port'], uri=uri)\n resp = requests.post(url,\n auth=(iosxe_restconf['username'], iosxe_restconf['password']),\n verify=False,\n headers=headers,\n data=data\n )\n return resp", "def send_post(url):\n HEADERS['accept'] = 'application/vnd.yang.data+json'\n if not url.startswith('/'):\n url = \"/{}\".format(url)\n url = BASE_URL + url\n resp = requests.post(url, headers=HEADERS)\n return resp", "def _api_post(self, query, data):\n r = requests.post(self._url + query,\n headers={'Content-Type': 'application/json', 'Accept': 'application/json'},\n auth=self._auth, data=json.dumps(data), timeout=self._request_timeout_secs)\n r.raise_for_status()\n return r", "def post_action(self, path, data=None):\n response = self._request(\"POST\", path, urllib.urlencode(data) if data else None)\n return self._handle_response(response)", "def _request_token(self, data):\n headers = {'Content-Type': 'application/x-www-form-urlencoded'}\n response = requests.post(\n \"{site}{token_url}\".format(\n site=self.auth_site,\n token_url=self._token_url\n ),\n data=data,\n headers=headers\n )\n\n return response", "def _send_request(self, url, text=None, params=None):\n if params is not None:\n for k, v in params.items():\n params[k] = v.encode(\"utf-8\")\n else:\n params = {}\n\n params['email'] = self._username\n\n if self._password:\n params['pass'] = self._password\n\n if self._hash:\n params['hash'] = self._hash\n\n if text is not None:\n params['s'] = self._stripslashes(text)\n\n\n try:\n response = requests.post(url, data=params)\n except Exception as e:\n print(str(e))\n\n result = response.content.decode('utf-8')\n \n\n try:\n json_data = json.loads(result)\n except ValueError as e:\n print(str(e))\n\n if json_data['status'] == \"Success\":\n return json_data\n elif json_data['status'] == \"Failure\":\n if json_data['error'].startswith(\"Error Authenticating.\"):\n print(json_data['error'])\n else:\n print(json_data['error'])\n else:\n print(json_data)", "def post(self, *path, **data):\n\t\treturn self.request('POST', *path, **data)", "def token_request(post_data, auth_config, conformance):\n auth = None\n if auth_config.get('confidential_client'):\n auth = requests.auth.HTTPBasicAuth(auth_config['client_id'],\n auth_config['client_secret'])\n\n uris = fhir.get_oauth_uris(conformance)\n\n response = requests.post(uris['token'],\n data=post_data,\n allow_redirects=False,\n auth=auth,\n timeout=5)\n\n return response", "def do_request(self, url, in_data=None, in_file_dict=None):\n url_string=url\n logger.debug(\n \"do_request request string: {string}\".format(string=url_string)\n )\n response=requests.post(\n url_string,\n data=in_data,\n files=in_file_dict,\n cookies=self.cookies #For Authentication!\n )\n return response", "def raw_post(\n self, uri: str, data: Optional[Dict] = None, json: Optional[Dict] = None, **kwargs\n ) -> requests.Response:\n return self.session.post(url=self._url(uri), data=data, json=json, **kwargs)", "async def _send(self, url, data):\n r = await self.session.post(url, json=data, headers=self.get_headers())\n\n if r.status < 200 or r.status >= 300:\n text = await r.text()\n logger.error(\n 'Error posting {} value of {} to {}: {} '.format(\n data['name'], data['value'], url, text\n )\n )\n\n r.release()", "def post(self, data=None, params=None):\n params = self.parameters(additional_parameters=params)\n res = post(self.endpoint_url, data=data, params=params)\n return Response(res)", "def post(self, endpoint, data=None):\n if endpoint.startswith('/'):\n endpoint = endpoint[1:]\n request_url = self.server + endpoint\n response = self.session.post(request_url, headers=self.headers, data=data)\n if response.status_code != 200:\n return False, response.text\n return True, response.json()", "def post_account(url, token, headers, http_conn=None, response_dict=None,\n service_token=None, query_string=None, data=None):\n close_conn = False\n if http_conn:\n parsed, conn = http_conn\n else:\n parsed, conn = http_connection(url)\n close_conn = True\n method = 'POST'\n path = parsed.path\n if query_string:\n path += '?' + query_string\n req_headers = {'X-Auth-Token': token}\n if service_token:\n req_headers['X-Service-Token'] = service_token\n if headers:\n req_headers.update(headers)\n conn.request(method, path, data, req_headers)\n resp = conn.getresponse()\n body = resp.read()\n if close_conn:\n conn.close()\n http_log((url, method,), {'headers': req_headers}, resp, body)\n\n store_response(resp, response_dict)\n\n if resp.status < 200 or resp.status >= 300:\n raise ClientException.from_response(resp, 'Account POST failed', body)\n resp_headers = {}\n for header, value in resp.getheaders():\n resp_headers[header.lower()] = value\n return resp_headers, body", "def post(self, endpoint, params=None, data=None):\n params = params or dict()\n data = data or dict()\n return self.request(verb=requests.post, address=self.project_address + endpoint,\n params=params, data=data)", "def _send_request(self,\r\n suffix,\r\n data=None,\r\n content_type=None,\r\n auth_user=None,\r\n auth_password=None):\r\n if self._base_url.startswith(\"http://\"):\r\n url = \"{}/{}\".format(self._base_url, suffix)\r\n else:\r\n url = \"http://{}/{}\".format(self._base_url, suffix)\r\n\r\n headers = {}\r\n if auth_user is not None:\r\n auth_string = \"{}:{}\".format(auth_user, auth_password)\r\n b64_string = b64encode(auth_string.encode()).decode()\r\n auth_header = 'Basic {}'.format(b64_string)\r\n headers['authorization'] = auth_header\r\n\r\n if content_type is not None:\r\n headers['Content-Type'] = content_type\r\n\r\n try:\r\n if data is not None:\r\n result = requests.post(url, headers=headers, data=data)\r\n else:\r\n result = requests.get(url, headers=headers)\r\n\r\n if result.status_code == 404:\r\n raise HealthException(\"No such transaction\")\r\n elif not result.ok:\r\n raise HealthException(\"Error {}:{}\".format(result.status_code, result.reason))\r\n\r\n except requests.ConnectionError as err:\r\n raise HealthException('Failed to connect to {}:{}'.format(url, str(err)))\r\n\r\n except BaseException as err:\r\n raise HealthException(err)\r\n\r\n return result.text", "async def post(self, resource: str, data: Optional[Sequence]):\r\n return await self.request(\"post\", self._api_base + resource, data=data)", "def _post(self, data=None, headers=None):\n return self.api.send_http_request_v2(method=\"POST\", url=\"https://auth.iqoption.com/api/v2/verify/2fa\",data=json.dumps(data), headers=headers)", "def push_post_request(self, data, sub_api=None, path=None, retry=True):\n self._outgoing_queue.append((data, sub_api, path, retry))", "def post(self, url, data, headers=None, follow_redirects=False):\n headers = self._setup_headers(headers)\n response = self.client.post(url, data=data, headers=headers,\n follow_redirects=follow_redirects)\n self._update_cookies(response.headers)\n return response", "def _doPOST(POST_DATA=LOGIN_POST_DATA, extra_headers=META_HEADERS, args=None, url=LOGIN_URL, cookies=None):\n body = ''\n if POST_DATA:\n body = '&'.join([k+'='+v for k,v in POST_DATA.items()]) % args\n \n headers={\n 'Accept-Encoding' : 'deflate',\n 'Content-Length' : len(body),\n }\n if extra_headers:\n headers.update(extra_headers)\n \n request = urllib2.Request(url, body, headers)\n try:\n response = urllib2.urlopen(request)\n if not cookies:\n cookies = CookieJar()\n cookies.extract_cookies(response, request)\n cookie_handler= urllib2.HTTPCookieProcessor(cookies)\n redirect_handler= urllib2.HTTPRedirectHandler()\n opener = urllib2.build_opener(redirect_handler, cookie_handler)\n resp = opener.open(request)\n return cookies, resp.read()\n except urllib2.HTTPError, e:\n print >> sys.stderr, \"Sky servers having some trouble - \", e\n raise e\n except urllib2.URLError, e:\n print >> sys.stderr, \"Sky servers having some trouble - \", e\n raise e", "def post_rest_call(api_url, data_dict, username, password, print_output=False):\n headers = {\n 'Content-Type': 'application/json'\n }\n\n response = requests.post(api_url,\n auth=HTTPBasicAuth(username, password),\n data=json.dumps(data_dict),\n headers=headers,\n verify=False,\n timeout=4)\n\n if print_output:\n if response.status_code == 201:\n print(\"POST OK %s (code %d)\" % (api_url, response.status_code))\n elif response.status_code == 200:\n print(\"POST OK %s (code %d)\" % (api_url, response.status_code))\n elif response.status_code == 204:\n print(\"POST OK %s (code %d)\" % (api_url, response.status_code))\n else:\n print(\"POST Failed for: %s (code %d)\" % (api_url, response.status_code))\n print(\" - Data: %s\" % json.dumps(data_dict))\n print(\" - Text: %s\" % response.text)\n return response", "async def _post_request(self, url, data):\n # Request the specific URL\n async with self.session.post(url, headers=self.headers, data=data) as resp:\n # Finally return the response\n return await resp.json()", "def http_post(call):\n\n verify_ssl = (\n True if \"verify_ssl\" not in call.data.keys() else call.data[\"verify_ssl\"]\n )\n\n headers = basic_headers\n if \"headers\" in call.data.keys():\n headers.update(call.data[\"headers\"])\n\n auth = None\n if \"auth_username\" in call.data.keys() and \"auth_password\" in call.data.keys():\n auth = (\n call.data[\"auth_username\"]\n if \"auth_username\" in call.data.keys()\n else None,\n call.data[\"auth_password\"]\n if \"auth_password\" in call.data.keys()\n else None,\n )\n\n data = None\n\n if \"data\" in call.data.keys():\n data = call.data[\"data\"]\n\n resp = requests.post(\n url=call.data[\"url\"],\n data=data,\n headers=headers,\n verify=verify_ssl,\n timeout=10,\n auth=auth,\n )\n\n return resp.status_code == 200", "def post_layer(self, data):\n self.logger.debug(\"Sending to Clair: {}\".format(data))\n return self._request('POST', self._CLAIR_POST_URI, json=data)", "def post(url, data={}, verify=True):\n data = requests.post(url=url, data=json.dumps(data), verify=verify)\n if data.status_code == 201:\n return data", "def post(file: str, data: bytes, addr: tuple):\n assert (\n type(file) == str or type(addr) == tuple or type(data) == bytes\n ), \"Invalid Parameter Types\"\n request = pickle.dumps([\"post\", file])\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect(addr)\n s.sendall(request)\n s.recv(1)\n data = bdtp.new_send_data_port(data, (\"\", 0))\n data.send(s)\n s.close()", "def _perform_http_request(self, url, data=None, headers=(), timeout=7.0):\n if self._is_first_request:\n self._is_first_request = False\n self._handle_first_request()\n\n if data is not None:\n if isinstance(data, dict) or isinstance(data, list):\n data = urlencoder.urlencode(data)\n else:\n raise RouterFetchError(\n 'POST data should be a dict, a list or None!'\n )\n\n try:\n req = requestor.Request(url, data)\n for header, value in headers:\n req.add_header(header, value)\n with contextlib.closing(requestor.urlopen(req, timeout=timeout)) as handle:\n self._is_logged_in = True\n return (\n handle.geturl(),\n handle.info(),\n handle.read().decode('utf-8', 'ignore')\n )\n except Exception as e:\n raise RouterFetchError('Failed making request: %s' % repr(e))", "def set_input_data(self, request, auth_data):\n request.auth_data = auth_data", "def _req_post(self, url: str, data, raw_res: bool = False):\n self._get_cookies()\n if not self._cookies:\n return\n r = reqtry.post(url, cookies=self._cookies, data=data, allow_redirects=False, timeout=(3, 3), tries=3, delay=1,\n backoff=1.5, jitter=(1, 1.5))\n if raw_res:\n return r\n assert r.status_code == 200, f\"Post request: Invalid http status code: {r.status_code}\"\n assert '\"errCode\":0' in r.text, f'Post response with error from server. Response: {r.text}'\n return r.text", "def _bg_post(url, data):\n threading.Thread(target=requests.post, args=(url, data)).start()", "def post_data(url, dat, ti=10):\n try:\n requests.post(url, data=dat, timeout=ti)\n except requests.exceptions.Timeout as re:\n print(\"Post Error:\", re)\n raise", "def auth_handler(self, url, method, timeout, headers, data):\n username = self.username\n password = self.password\n return basic_auth_handler(url, method, timeout, headers, data, username,\n password)", "def send_request(\n self,\n path: str,\n method: str,\n data: Optional[dict] = None,\n retry_auth=True,\n ) -> Tuple[int, dict]:\n path = urllib.parse.quote(path)\n\n try:\n conn = HTTPConnection(\n host=self.host,\n port=self.port,\n )\n params = {}\n\n if data is not None:\n params[\"body\"] = json.dumps(data)\n\n headers = {}\n\n if self.token is not None:\n headers[\"Auth-Token\"] = self.token\n\n conn.request(\n method=method,\n url=path,\n headers=headers,\n **params,\n )\n res = conn.getresponse()\n response_data = res.read()\n\n if res.status == 401 and retry_auth:\n # try to get a new token:\n\n if self.authenticate():\n return self.send_request(\n path=path,\n method=method,\n data=data,\n retry_auth=False,\n )\n\n return res.status, json.loads(response_data)\n finally:\n conn.close()", "def send_post(data, serial_object, host, url):\n datalen = len(data) + 4\n \n if _open_serv(host, serial_object):\n try:\n serial_object.write(('AT+CIPSEND=0,\\r\\n').encode())\n time.sleep(0.5)\n serial_object.write(('POST {} HTTP/1.1\\r\\n').format(url).encode())\n serial_object.write(('Host: {}\\r\\n').format(host).encode())\n serial_object.write(('Content-Type: application/json\\r\\n').encode()) \n serial_object.write(('Accept: */*\\r\\n').encode())\n serial_object.write(('Content-Length: {}\\r\\n').format(datalen).encode())\n serial_object.write(('\\r\\n').encode())\n serial_object.write(('\\r\\n').encode())\n serial_object.write(('{}\\r\\n').format(data).encode())\n serial_object.write(('\\r\\n').encode())\n serial_object.write(('\\r\\n').encode())\n serial_object.write(chr(26).encode())\n pat = re.compile('\\+IPCLOSE: 0,1')\n if _valid_send(serial_object, pat):\n print ('Data Send')\n else:\n print ('Cannot send data')\n except serial.SerialException:\n print ('Something goes wrong')", "def _send(self, endpoint, method, data):\n\n if method == \"POST\":\n return requests.post(\n f\"{self.API_URL}{endpoint}\",\n headers=self.headers,\n cookies=self.cookies,\n json=data\n )\n else:\n raise ValueError(f\"supported methods are POST but given {method}\")", "def post(self):\n data = self.convert_argument_to_json()\n\n email = data.get('email', None)\n password = data.get('password', None)\n\n if not email or not password:\n raise tornado.web.HTTPError(400, 'Invalid username or password')\n\n with self.session_scope() as session:\n user = session.query(Auth).filter(Auth.email == email).one_or_none()\n\n if not user:\n raise tornado.web.HTTPError(400, 'Incorrect email. No user found for {}'.format(email))\n\n if not match_password(password, user.hashed):\n raise tornado.web.HTTPError(400, 'Incorrect password for {}'.format(email))\n\n token = AuthToken.create_token(session, user, AuthToken.AUTHENTICATION_TOKEN)\n\n response = dict(\n token=str(token.uid),\n user=user.to_json()\n )\n\n self.write(response)", "def send_post(url, data, headers, return_output=False):\n req = requests.post(url=url, data=json.dumps(data), headers=headers)\n if return_output:\n return req\n if str(req.status_code).startswith('2'):\n print 'SUCCESS! {0} {1} {2}'.format(req.status_code, req.reason, req.content)\n else:\n print 'FAIL! {0} {1} {2}'.format(req.status_code, req.reason, req.content)\n exit(77)", "def post(self, url, data, allow_redirects=False):\r\n try:\r\n op = lambda: self.session.post(url, data=data,\r\n allow_redirects=allow_redirects)\r\n response_json = self._try_with_login(op)\r\n except (RequestException, ConnectionError, HTTPError, ValueError) as err:\r\n # reraise as promised GradingServiceError, but preserve stacktrace.\r\n #This is a dev_facing_error\r\n error_string = \"Problem posting data to the grading controller. URL: {0}, data: {1}\".format(url, data)\r\n log.error(error_string)\r\n raise GradingServiceError(error_string)\r\n\r\n return response_json", "def post(self, uri, data=None, json=None, **kwargs):\n return self.session.post(uri, data=data, json=json, **kwargs)", "def apost(url, **kwargs):\n return requests.post(url, **kwargs)", "def post_binary(self, url: str, data: Any, content_type: str = None, token: str = None) -> dict:\n res = requests.post(url,\n data=data,\n headers=self._get_headers(\n token,\n {\"Content-Type\": (content_type or \"application/octet-stream\")}\n ),\n verify=False,\n proxies=self._get_proxies())\n if not res.ok:\n logger.error(http_debug(res))\n elif logger.isEnabledFor(logging.DEBUG):\n logger.debug(http_debug(res))\n return self._parse_json_response(res, token)", "def token_request(post_data, auth_config, conformance, request_method='POST'):\n auth = None\n allow_redirects = False\n timeout = 5\n\n if auth_config.get('confidential_client'):\n auth = requests.auth.HTTPBasicAuth(auth_config['client_id'],\n auth_config['client_secret'])\n\n uris = fhir.get_oauth_uris(conformance)\n\n if request_method == \"GET\":\n response = requests.get(uris['token'],\n data=post_data,\n allow_redirects=allow_redirects,\n auth=auth,\n timeout=timeout)\n else:\n response = requests.post(uris['token'],\n data=post_data,\n allow_redirects=allow_redirects,\n auth=auth,\n timeout=timeout)\n return response", "def make_post_request(url, headers=None, json_data=None, data=None):\n logger.info(\"Inside: make_post_request\")\n logger.debug(\"make_post_request: parameters - {}, {}, {}, {}\".format(url, headers, json_data, data))\n\n timeout = get_config(REQUEST_TIMEOUT,\"timeout\")\n\n if not headers:\n headers = {}\n\n if json_data:\n resp = requests.post(url, verify=False, headers=headers, json=json_data, timeout=timeout)\n elif data:\n resp = requests.post(url, verify=False, headers=headers, data=data, timeout=timeout)\n\n logger.debug('received status : {}'.format(resp.status_code))\n logger.debug('received text : {}'.format(resp.text))\n logger.info(\"Exit: make_post_request\")\n if RestClient.result_success(resp.status_code):\n return resp\n else:\n err_msg = 'ERROR, received {} code during API call {}'.format(resp.status_code, url)\n logger.error(err_msg)\n raise APIException(err_msg, resp.text)", "def auth_postrequest_json(self, url, token, params):\n headers = {\n \"Authorization\": token,\n \"Content-Type\": \"application/json\"\n }\n\n response = self.postrequest_json(url, params, headers)\n return response", "def sr_post(self, route_or_uri, data, params=None, raw_response=False, **kwargs):\n op = lambda r: self.post(r, json=data)\n return self.__req(\n route_or_uri,\n params=params,\n query={},\n op=op,\n raw_response=raw_response,\n **kwargs,\n )", "def send(self, url, data=None):\n if data:\n info = {\n \"id_string\": data.xform.id_string,\n \"uuid\": data.uuid,\n }\n valid_url = url % info\n requests.get(valid_url)", "def _api_request_post(self, endpoint, data, headers=None):\n\n all_headers = {\"Authorization\": self.auth_header}\n\n if headers:\n for header in headers:\n all_headers[header] = headers[header]\n\n response = requests.post(url=f\"{self.api_url}/{endpoint}\", headers=all_headers, data=data)\n code = response.status_code\n if 200 <= code < 300:\n logging.debug(f\"API POST call: {self.api_url}/{endpoint} | {code}\")\n encoding = response.encoding\n raw = response.content\n return json.loads(raw.decode(encoding))\n elif code > 500:\n raise APIAuthException\n else:\n logging.error(f\"ERROR: Bad API POST call: {self.api_url}/{endpoint} | {code}\")", "def post_data(self, question_id, auth_token=None, data=None, headers=None):\n if not data:\n data = self.answer\n user = self.create_user()\n user_id = user[0]\n if not auth_token:\n auth_token = user[1]\n if not headers:\n headers = {\"Authorization\":\"Bearer {}\".format(auth_token)}\n path = \"/api/v2/questions/{}/answers\".format(int(question_id))\n result = self.client.post(path, data=json.dumps(data),\n headers=headers,\n content_type='application/json')\n return result", "def post_request(self, path='', data=None, user=None, **kwargs):\n request = self.rf.post(path, data, **kwargs)\n request.user = user or self.super_user\n return request", "def post(self, url, content, token=None):\n headers = {}\n if token:\n headers = token_header(token)\n return self.app.post(url, data=content, headers=headers)", "def send_post(self, api_url, query=None, body=None, data=None):\n\n if body is not None:\n resp = requests.post(self.base_url + api_url, params=query, data=body)\n else:\n resp = requests.post(self.base_url + api_url, params=query, json=data)\n\n return resp", "def _upload(url, data_file, username, password):\n url_match = '(http(s)?)\\:\\/\\/localhost'\n if re.search(url_match, url):\n print(\"Please configure url settings.\")\n exit(1)\n\n polarion_request = post(url,\n data=data_file,\n auth=auth.HTTPBasicAuth(username,\n password))\n status_code = polarion_request.status_code\n if status_code == codes.ok:\n return status_code\n else:\n print(\"Results upload failed with the follow: {}\".format(\n polarion_request.status_code))\n raise exceptions.RequestException", "def post(self, uri, format='json', data=None, authentication=None, **kwargs):\r\n content_type = self.get_content_type(format)\r\n kwargs['content_type'] = content_type\r\n\r\n if data is not None:\r\n kwargs['data'] = self.serializer.serialize(data, format=content_type)\r\n\r\n if authentication is not None:\r\n kwargs['HTTP_AUTHORIZATION'] = authentication\r\n\r\n return self.client.post(uri, **kwargs)", "def _post(self, url, **kwargs):\n return self._call('POST', url, kwargs)", "def post(self, url, body=None, headers=None, serialize=True):\n return self._request('POST', url, body, headers, serialize)", "def post(self, *args, **kwargs):\n return self._requests_call(util.requests_post, *args, **kwargs)", "def send_request(data, url, proxy, headers=None):\n session = requests.Session()\n session.trust_env = False\n session.proxies = { 'http': proxy } if proxy else {}\n session.headers = HEADERS if headers is None else headers\n r = session.post(url=url, data=data)\n print r.status_code", "def make_request(url, params, auth=None, data=None, contentType=None):\n #print 'make_request'\n\n # Import Gevent and monkey patch\n #import gevent\n from gevent import monkey\n monkey.patch_all()\n\n # Import IO Libraries\n import urllib\n import urllib2\n\n if params:\n url = url + '?' + urllib.urlencode(params)\n\n #print url\n #print data\n #print auth\n #print contentType\n\n req = urllib2.Request(url, data=data)\n\n if auth:\n req.add_header('AUTHORIZATION', 'Basic ' + auth)\n\n if contentType:\n req.add_header('Content-type', contentType)\n else:\n if data:\n req.add_header('Content-type', 'text/xml')\n\n\n return urllib2.urlopen(req)", "def post_request(\n self,\n alias,\n uri,\n data=None,\n json=None,\n params=None,\n headers=None,\n files=None,\n allow_redirects=None,\n timeout=None):\n session = self._cache.switch(alias)\n if not files:\n data = utils.format_data_according_to_header(session, data, headers)\n # XXX workaround to restore library default behaviour. Not needed in new keywords\n redir = True if allow_redirects is None else allow_redirects\n\n response = self._common_request(\n \"post\",\n session,\n uri,\n data=data,\n json=json,\n params=params,\n files=files,\n headers=headers,\n allow_redirects=redir,\n timeout=timeout)\n return response", "def post_response(self, path, data=None, user=None, **kwargs):\n if user:\n self.client.force_login(user)\n return self.client.post(path, data, **kwargs)", "def func_auth(self, data):\n check = bytes(data).decode().encode('ascii', 'ignore').decode().lower().rstrip()\n if check == 'auth login':\n auth_id = library.q_id_generate(size=12)\n message = '334 ' + auth_id\n self.func_sender(message)\n self.request.recv(self.std_recv_size)\n auth_id_two = library.q_id_generate(size=12)\n message_two = '334 ' + auth_id_two\n self.func_sender(message_two)\n self.request.recv(self.std_recv_size)\n message_three = self.conf_th_ic.get_item(q_key='std-messages').get(check)\n self.func_sender(message_three)\n return True", "def __post(self, url, payload=None, headers=None):\n if headers is None:\n headers = {\"Content-Type\": \"application/json\"}\n return self.__req(url, \"POST\", body=payload, headers=headers)", "def post(api, url, payload, headers=None, auth=_KERBEROS_AUTH, proxies=None,\n retries=_NUM_OF_RETRIES, timeout=None):\n return call(api, url, 'post', payload=payload,\n headers=headers, auth=auth, proxies=proxies, retries=retries,\n timeout=timeout)" ]
[ "0.6842728", "0.6454791", "0.63290054", "0.6314501", "0.62772477", "0.6190568", "0.61195064", "0.6063945", "0.60278213", "0.5984815", "0.5982844", "0.5974754", "0.59532285", "0.5944688", "0.5935827", "0.59314847", "0.5888572", "0.58549273", "0.5854561", "0.58424336", "0.5775395", "0.57697403", "0.57646114", "0.5763307", "0.57305205", "0.5728581", "0.56576097", "0.5620381", "0.5609589", "0.5602404", "0.5588845", "0.55735254", "0.55569154", "0.5534694", "0.5532454", "0.55284464", "0.5515434", "0.5493628", "0.5474315", "0.5463979", "0.5449207", "0.5435194", "0.5426962", "0.54199976", "0.54156953", "0.54015386", "0.53998166", "0.5381039", "0.53614336", "0.53489584", "0.5323639", "0.5318432", "0.53097194", "0.53087527", "0.5279384", "0.52756226", "0.52731127", "0.5254929", "0.5250957", "0.5237993", "0.52378225", "0.5230981", "0.5221066", "0.5207995", "0.52046514", "0.52045107", "0.5200603", "0.51897335", "0.518416", "0.5182986", "0.5175975", "0.5173564", "0.51702815", "0.51679647", "0.51546854", "0.5140717", "0.51386786", "0.5137018", "0.5131065", "0.5126533", "0.5125722", "0.51256526", "0.511861", "0.5114164", "0.51091754", "0.51080585", "0.50879085", "0.5081384", "0.50775504", "0.50775313", "0.5075307", "0.5073616", "0.5067334", "0.5066965", "0.5047984", "0.5046601", "0.503645", "0.50230336", "0.50115085", "0.5011276" ]
0.7495922
0
Sends a POST request every second to the monitoring server indicating that the process is still running.
def post_heartbeat(host, name, auth=None): data = {'name': name, 'status': 'ok'} try: response = post('{host}/monitoring/heartbeat'.format(host=host), data, auth) except urllib2.URLError: print("Failed to send heartbeat.", file=sys.stderr) else: if response.strip() != 'ok': print('POST got response {response}'.format(response=response), file=sys.stderr) timer = threading.Timer(1.0, post_heartbeat, args=(host, name, auth)) # set the thread as a daemon to exit the program cleanly # when the main thread finishes timer.daemon = True timer.start()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def periodic_timer(self):\n while self.running:\n self.sendStatusQuery()\n time.sleep(REPORT_INTERVAL)", "def keep_alive():\r\n app = Flask(\"\")\r\n @app.route(\"/\")\r\n def home():\r\n return \"Your bot is now alive!\"\r\n\r\n def run():\r\n app.run(host=\"0.0.0.0\", port=8080)\r\n\r\n\r\n server = Thread(target=run)\r\n server.start()", "def background_catch_up():\n while True:\n time.sleep(interval)\n s = 'http://{0}:{1}'.format(args.host, port)\n req = urllib2.Request(s)\n try:\n response = urllib2.urlopen(req)\n response.read()\n except Exception as e:\n pass", "def your_process(seconds):\r\n global STATUS\r\n sleep(seconds)\r\n STATUS = True", "def process_request(t):\n time.sleep(t)", "def process_request(t):\n time.sleep(t)", "def process_request(t):\n time.sleep(t)", "def keepalive(self) -> None:", "def perform_action(self):\n logger.info(\"Now sending a keepalive to the primary\")\n self.connection_handler.send_message(\"I am still alive, client: {num}\".format(num=self.uuid))\n time.sleep(5)", "def console_server(request):\n return run_server(interval='1')", "def ping_moonrat():\n threading.Timer(3600, ping_moonrat).start()\n text = \"Moonrat is still active\\n\"\n slack_client.api_call(\n \"chat.postMessage\",\n channel='G9P7X8Q0H',\n text=text,\n )", "def inform_alive(self):\n\n response = requests.get(\n \"https://brokenprotocol.xyz/Device/Heartbeat\",\n headers={\"auth\": self.token}\n )\n response.raise_for_status()", "def restart():\n info = request.get_json() or {}\n delay_secs = int(info.get('delay', 0))\n\n t = threading.Timer(delay_secs, update_trigger_file)\n t.start()\n\n return jsonify('Success')", "def _keep_alive(self):\n self.send_keep_alive(self.server.startup_time_delta)\n return TaskSignal.AGAIN", "def keepAliveReceived(self):", "def run( self ):\n while True:\n try:\n time.sleep( 5 )\n self._monitorProcess()\n except Exception, e:\n self.logger.exception( \"Error starting monitor process\" )", "def post(host):\n redis.setex('dispatcher',host,60)\n timer = threading.Timer(20.0, post, args=[host])\n timer.daemon = True\n timer.start()", "def startHeartbeat(self):\n self.post.__sendHeartbeat()", "def background_thread():\n count = 0\n while True:\n socketio.sleep(10)\n count += 1\n socketio.emit('my_response',\n {'data': 'Server generated event', 'count': count},\n namespace='/test')", "async def check_status(self):\n while True:\n async with self._loop_lock:\n new_monitor_processes = {}\n for class_name in self.monitor_processes:\n monitor = self.monitor_processes[class_name][\"process\"]\n if monitor.poll() is not None:\n log = f\"Monitor {class_name} has stopped with code: {monitor.returncode}\"\n if monitor.returncode:\n self.general_logger.warning(log)\n if self.config[\"WebhookConfig\"][\"crash_webhook\"]:\n embed = get_mm_crash_embed(\n \"Monitor \" + class_name,\n monitor.returncode,\n monitor.pid,\n )\n ts = datetime.now().strftime(\n self.config[\"WebhookConfig\"][\"timestamp_format\"]\n )\n\n embed.set_footer(\n text=f\"{self.config['WebhookConfig']['provider']} | {ts}\",\n icon_url=self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n )\n data = json.dumps(\n {\n \"embeds\": [embed.to_dict()],\n \"username\": \"MonitorManager process watcher\",\n \"avatar_url\": self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n }\n )\n r = await self.client.fetch(\n self.config[\"WebhookConfig\"][\"crash_webhook\"],\n method=\"POST\",\n body=data,\n headers={\"content-type\": \"application/json\"},\n raise_error=False,\n )\n else:\n self.general_logger.info(log)\n else:\n new_monitor_processes[class_name] = self.monitor_processes[\n class_name\n ]\n self.monitor_processes = new_monitor_processes\n\n new_scraper_processes = {}\n for class_name in self.scraper_processes:\n scraper = self.scraper_processes[class_name][\"process\"]\n if scraper.poll() is not None:\n log = f\"Scraper {class_name} has stopped with code: {scraper.returncode}\"\n if scraper.returncode:\n self.general_logger.warning(log)\n if self.config[\"WebhookConfig\"][\"crash_webhook\"]:\n embed = get_mm_crash_embed(\n \"Scraper \" + class_name,\n scraper.returncode,\n scraper.pid,\n )\n ts = datetime.now().strftime(\n self.config[\"WebhookConfig\"][\"timestamp_format\"]\n )\n\n embed.set_footer(\n text=f\"{self.config['WebhookConfig']['provider']} | {ts}\",\n icon_url=self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n )\n data = json.dumps(\n {\n \"embeds\": [embed.to_dict()],\n \"username\": \"MonitorManager process watcher\",\n \"avatar_url\": self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n }\n )\n r = await self.client.fetch(\n self.config[\"WebhookConfig\"][\"crash_webhook\"],\n method=\"POST\",\n body=data,\n headers={\"content-type\": \"application/json\"},\n raise_error=False,\n )\n else:\n self.general_logger.info(log)\n else:\n new_scraper_processes[class_name] = self.scraper_processes[\n class_name\n ]\n self.scraper_processes = new_scraper_processes\n await asyncio.sleep(1)", "def _keep_alive(self):\n while self._is_running:\n secs_since_last_cmd = time.time() - self._last_cmd_time\n if not self._is_action_active and secs_since_last_cmd > self._keep_alive_secs:\n self._transport.send(activity_cmd(\" \"))\n self._last_cmd_time = time.time()\n self._log.debug(\"Keeping alive\")\n self._keep_alive_event.wait(timeout=self._keep_alive_secs)", "async def monitor(self, session: ClientSession, params: MonitoringData) -> None:\n while True:\n result = await self.check_website(session, params)\n self._producer.send(result)\n await asyncio.sleep(params.interval)", "def background_thread():\n count = 0\n while True:\n time.sleep(10)\n count += 1\n socketio.emit('my response',\n {'data': 'Server generated event', 'count': count},\n namespace='/test')", "def watch(self, url):\n self.__url = url\n self.downtime_info = None\n self.__timer.start()", "def run(self):\n while True:\n print(\"I'm running in the background\")\n time.sleep(self.interval)", "def background_thread():\n count = 0\n while True:\n socketio.sleep(1)\n count += 1\n t = time.strftime('%M:%S', time.localtime())\n cpus = [1,2,3,4] #\n print('sending')\n socketio.emit('server_response',\n {'data': [t, cpus[0],cpus[1],cpus[2],cpus[3]], 'count': count})", "def _keep_alive(self):\n self.send_keep_alive()\n return TaskSignal.AGAIN", "def slot_keepalive_timer(self, _sender, _data):\r\n if self.connected:\r\n #self.debug(\"### sending keepalive\")\r\n self._try_send_raw(\"2::\")", "def thread_function(client):\n threading.Timer(30.0, thread_function).start()\n client.publish(\"serverCommand/keepalive\", \"0\")\n print(\"Message Sent. (keepalive)\")", "def continue_server():\n update_server_status({'ready': True})", "def run(self):\n while self.running:\n self.handle_request()", "def monitorOne(self,website):\n checkInterval = website.checkInterval\n time.sleep(checkInterval)\n while self\\\n .queueTermination.empty():\n startSubProcess = time.time()\n # todo define timeout for requests\n try :\n req = requests.get(website.url, timeout=checkInterval)\n reqCode = req.status_code\n reqTime = req.elapsed\n # Generic to handle all kind of http exceptions\n # Possible enhancement\n except Exception:\n continue\n # unix epoch time good for comparison\n currentTime = time.time()\n website.log[currentTime] = {'code': reqCode, 'responseTime': reqTime}\n # 2 mins\n twoMinsDic = self.getTimeframedData(website, 120, currentTime=currentTime)\n self.queueTwoMin.put(twoMinsDic)\n # 10 mins\n tenMinsDic = self.getTimeframedData(website, 600, currentTime=currentTime)\n self.queueTenMin.put(tenMinsDic)\n # 1 hour\n hourDic = self.getTimeframedData(website, 3600, currentTime=currentTime)\n self.queueHour.put(hourDic)\n\n endSubProcess = time.time()\n # Wait for the next check\n try:\n time.sleep(checkInterval-(endSubProcess-startSubProcess))\n except ValueError:\n pass\n\n # Terminate all processes\n self._terminateAll()\n return", "async def handle_live(self, websocket):\n while True:\n payload = self.generate_payload()\n await websocket.send(json.dumps(payload))\n await asyncio.sleep(self.update_frequency_milliseconds / 1000)", "def monitor(self):\n while not self.terminated:\n try:\n if (time.time() - self.updated_time) < 5:\n messages = self.messages.copy()\n # procs = np.min([ len(messages), 9 ]) + 1\n # pool = ThreadPool(procs)\n # pool.map(self.process, messages)\n # pool.close()\n # pool.join()\n for message in messages:\n self.process(message)\n elif self.ws:\n self.updated_time += 10\n self.ws.close()\n except Exception as e:\n self.on_error(None, \"Monitoring Error: {}\".format(e))\n continue\n finally:\n time.sleep(0.1)", "def admin_server(request):\n return run_server(interval='10000')", "def periodicCall(self):\n self.gui.processIncoming()\n if not self.running:\n import sys\n sys.exit(1)\n self.master.after(UPDATE_DELAY, self.periodicCall)", "def Listen(self):\n while True:\n time.sleep(1)", "def poll_http(interval, port_no):\n import requests\n\n url_string = 'http://0.0.0.0:{}/refresh'.format(port_no)\n\n print('DBG: poll_http({}): got to thread @ {}'.format(\n interval, time.strftime(\"%I:%M:%S\")))\n print('url_string = {}'.format(repr(url_string)))\n\n while True:\n time.sleep(interval)\n print('DBG: thread woke up @ {}'.format(time.strftime(\"%I:%M:%S\")))\n r = requests.get(url_string)\n print('DBG: Requests.text = {}'.format(repr(r.text)))", "def daemon_job(interval):\n time.sleep(3) # Wait for api server to start first\n while True:\n try:\n crawl()\n process_notification()\n except Exception:\n traceback.print_exc()\n time.sleep(interval)", "def service( self ):\n\n self.alive = time.time()", "def watch_process(self):\n psutil.wait_procs([psutil.Process(self._proc.pid)],\n callback=self.start)", "async def monitor():\n global counter\n while True:\n time.sleep(1)\n print(counter, 'reqs/sec')\n counter = 0", "def on_keep_alive(self):\n log.debug(\"Received keep-alive signal\")", "def status_task():\n props = [\n (STAT_TIME, current_time),\n (STAT_CONDITION, weather_condition)\n ]\n\n # Send the status request with the current time and condition.\n send_status_request(props)\n\n # Create and start a timer to repeat this task periodically.\n t = Timer(report_interval, status_task)\n t.setDaemon(True)\n t.start()", "def status(self):\n # process running ?\n pid = self.get_pidfile()\n \n running = True\n \n # process is not running\n if pid is None:\n running = False\n else:\n if not self.send_signal(pid,0):\n running = False\n # abnormal state, delete the file\n self.delete_pidfile()\n \n if running:\n message = \"server is running\\n\"\n else:\n message = \"server is not running\\n\"\n sys.stdout.write(message)\n \n return running", "def monitor(self):\r\n while True:\r\n for worker, start_time in self.workers.items():\r\n if (not worker.isAlive() or\r\n self.timeout\r\n and datetime.now() - start_time > self.timeout): \r\n\r\n self.work_count.get_nowait()\r\n self.jobs.task_done()\r\n del self.workers[worker]\r\n\r\n time.sleep(1)", "def serve_forever(self, unused_parameter=0.5):\r\n self.stop = False\r\n while not self.stop:\r\n self.handle_request()", "def run_while_true(server_class=BaseHTTPServer.HTTPServer,\n handler_class=BaseHTTPServer.BaseHTTPRequestHandler):\n server_address = ('localhost', 8080)\n httpd = server_class(server_address, handler_class)\n while keep_running():\n httpd.handle_request()", "def periodically_request_websites_metrics() -> int:\n # Making a get request\n configuration_path = os.path.abspath('configuration/service_configuration.json')\n list_of_websites_to_check = read_service_configuration(\n configuration_file=configuration_path, section='list_of_websites_to_check')\n try:\n\n remote_service_url = os.environ.get('websites_checker_service_url','http://192.168.1.101:8080/api/v1/websites_metrics')\n response = requests.post(url=remote_service_url, json=list_of_websites_to_check)\n if response:\n logger.info(f\"The request has been sent to {remote_service_url} with payload: {list_of_websites_to_check}\")\n\n else:\n logger.error(f\"Error contacting the service {remote_service_url}\")\n except Exception as error:\n logger.error(f\"The Exception {error} occurred\")\n return 1", "def KeepAlive(self) -> bool:", "def is_alive():\n return jsonify({'message': 'Service is alive'}), 200", "def pingEsp(self):\n\t\twhile True:\n\t\t\tprint (\"[{}] Keeping alive the ESP8266 connection\".format(\n\t\t\t\tint(time.time()),\n\t\t\t))\n\t\t\tmqttCli.publish(\"ping\", mqttJsonDump('void'))\n\t\t\ttime.sleep(30)", "def do_monitor(self):\n while not self.expired:\n self.expired = True\n time.sleep(self.interval)\n self.dead_fn()", "def background_thread():\n count = 0\n with open(\"logs.txt\", \"r\") as logfile:\n while True:\n socketio.sleep(1)\n count += 1\n\n line = logfile.readline()\n if line:\n socketio.emit('my_response',\n {'data': line, 'count': count},\n namespace='/test')", "def check_loop(\n url, period=5, timeout=10, body_check_re='',\n producer=None, oneshot=False):\n while True:\n worker = threading.Thread(target=check, kwargs={\n 'url': url,\n 'timeout': timeout,\n 'body_check_re': body_check_re,\n 'producer': producer,\n })\n logger.info('check url=%s' % url)\n worker.start()\n time.sleep(period)\n if oneshot:\n return", "def run(self):\n while True :\n try:\n appinfo = self.db.hgetall(self.appname)\n appinfo_str = json.dumps(appinfo)\n data = {'applicationname':self.appname,'appinfo':appinfo_str}\n response = requests.post(REGISTRAR_URL, data=data)\n time.sleep(2)\n except :\n pass", "def monitor(self, target):\n while self.RUNNING:\n check_time = datetime.now()\n next_check = check_time + timedelta(seconds=target[\"frequency\"])\n\n try:\n self.produce(\n get(target[\"url\"], timeout=target[\"frequency\"] - 0.5),\n target.get(\"regex\"),\n check_time,\n )\n except Timeout:\n self.logger.warning(\"Check for %s timed out\", target[\"url\"])\n except RequestException as e:\n self.logger.error(e)\n except re.error as e:\n self.logger.error(e)\n break\n\n # Busy loop until next check_time\n while datetime.now() < next_check:\n sleep(1)", "def keep_alive(self):\n req = BFGlobalFactory.create(\"ns1:KeepAliveReq\")\n rsp = self._soapcall(BFGlobalService.keepAlive, req)\n if rsp.header.errorCode != APIErrorEnum.OK:\n logger.error(\"{keepAlive} failed with error {%s}\",\n rsp.header.errorCode)", "def cron_health(request):\n if checkIfProcessRunning('crond'):\n print('Yes a crond process was running')\n return HttpResponse(status=200)\n else:\n print('crond process is Not running')\n\n return HttpResponse(status=500)", "def stream():\n while True:\n try:\n r = requests.post(\"http://streamer_0:5000/stream\", json={})\n break\n except requests.exceptions.ConnectionError:\n logging.error(\"Could not connect to server streamer_0, retrying\")\n time.sleep(2)\n continue\n logging.info(\"'http://streamer_0:5000/stream', response = {}\".format(r.status_code))\n if r.status_code != 200:\n time.sleep(2)\n stream()", "def schedule_alive_job(self):\n if 'alive-notification' not in settings:\n return\n\n self.scheduler.add_job(\n lambda: self.event_notifier.send_message('Surveillance is alive'),\n **settings['alive-notification'])", "async def keep_alive(self):\n self._keepalive = True\n while True:\n await gen.sleep(self.KEEPALIVE_INTERVAL)\n if not self._keepalive:\n return\n try:\n # lines that start with : are comments\n # and should be ignored by event consumers\n self.write(\":keepalive\\n\\n\")\n await self.flush()\n except StreamClosedError:\n return", "def serve_forever(self, poll_interval=0.5):\n logging.info('RPC server starting')\n self._idle_thread.start()\n SimpleJSONRPCServer.SimpleJSONRPCServer.serve_forever(self, poll_interval)", "def run(self):\r\n self.rpc_server.serve_forever(0.5)", "def __call__(self):\n hub.sleep(random.randint(1, self.interval))\n while True:\n self.send_req()\n self.reply_pending = True\n hub.sleep(self.interval)\n if self.reply_pending:\n self.no_response()", "def monitor_and_terminate(self):\n import time\n import datetime\n\n keep_running = True\n\n while keep_running:\n\n print()\n print(datetime.datetime.now().replace(microsecond=0))\n print(self.get_monitor_string())\n\n time.sleep(30)\n\n _, status = self.reporter.get_job_status(self.info)\n if status[\"active\"]+status[\"running\"] == 0:\n keep_running = False\n\n print(\"All tasks done.\")", "def serve(self):\n\t\tself.keep_running=1\n\t\tif self.debug:\n\t\t\tprint \"server started\"\n\t\ttry:\n\t\t\twhile self.keep_running:\n\t\t\t\tself.handle_request()\n\t\tfinally:\n\t\t\tif self.debug:\n\t\t\t\tprint \"server finished\"\n\t\t\tself.keep_running=0\n\t\t\tself.close()", "def __sendHeartbeat(self):\n \n while not rospy.is_shutdown():\n rospy.sleep(5)\n self.setOutput(self.write_start+1,0)", "def updated():\n ws = request.environ.get('wsgi.websocket', None)\n print(\"web socket retrieved\")\n app.number_of_connexion += 1\n if ws:\n while True:\n delay = random.randint(MIN_DELAY, MAX_DELAY)\n gevent.sleep(delay)\n try:\n ws.send(str(app.number_of_connexion))\n except WebSocketError:\n print(\"socket died\")\n app.number_of_connexion -= 1\n return \"disconnected\"\n else:\n raise RuntimeError(\"Environment lacks WSGI WebSocket support\")", "def heartbeat(jsonify=None):\n url = URLS.heartbeat()\n payload = {\n \"request\": URLS.get_endpoint(url)\n }\n generate_signature(payload)\n data, err = request_post(url, payload, jsonify)\n return data, err", "def do_POST(self):\n bugsnag = self.server.bugsnag\n body = self.rfile.read(int(self.headers['Content-Length']))\n bugsnag.enqueue(body)\n\n response = (\"OK %s:%s -> %s (%s/%s)\\n\" % (bugsnag.listen, bugsnag.port, bugsnag.endpoint, bugsnag.queue.qsize(), bugsnag.queue.maxsize))\n\n try:\n self.send_response(200)\n self.send_header('Content-Length', len(response))\n self.end_headers()\n self.wfile.write(response)\n except:\n print \"Client disconnected before waiting for response\"\n print_exception(*sys.exc_info())\n print \"continuing...\"", "def _heartbeat_loop(self):\n # set last time so that \"if t_now - t_last >= HEARTBEAT_LOG_INTERVAL\"\n # below evalutes to True on the first run\n t_last = time.time() - HEARTBEAT_LOG_INTERVAL - 1\n while True:\n alive = 0\n # count alive processes \n for p in PROCESSES:\n if p.is_alive():\n alive += 1\n\n # no processes are alive - exit heartbeat loop\n if alive == 0:\n return\n\n t_now = time.time()\n if t_now - t_last >= HEARTBEAT_LOG_INTERVAL:\n # log heartbeat\n obj = { \n 'timestamp': time.time(),\n 'child_procs_total': self._procs_total,\n 'child_procs_alive': alive,\n 'probe_req_queue_len': self._probe_request_queue.qsize(),\n 'probe_resp_queue_len': \\\n self._probe_response_queue.qsize(), \n }\n \n # push to shared mem\n self._sm.set(config.BASE['SHARED_MEM_HEARTBEAT_KEY'],\n json.dumps(obj), HEARTBEAT_TTL)\n LOG.debug('pushed a heartbeat to the shared memory')\n\n t_last = t_now\n\n time.sleep(HEARTBEAT_LOOP_INTERVAL)", "def is_monitor_process_live(pid_file):\n live = False\n\n try:\n check_process_status(pid_file)\n live = True\n except ComponentIsNotRunning:\n pass\n\n return live", "def serve(self):\n super(BlacknetSensor, self).serve(BlacknetSensorThread, BLACKNET_PING_INTERVAL, self.do_ping)", "def REP_watcher():\n global REQ_sent_time, REP_recd_time, pid, patience_seconds\n while True:\n time.sleep(patience_seconds) # how often to check\n try:\n recent_REQ_sent_time = REQ_sent_time.popleft()\n # if we got here; we have a recent_REQ_sent_time\n time.sleep(patience_seconds) # allow time for receipt of the REP\n try:\n recent_REP_recd_time = REP_recd_time.popleft()\n # if we got here; we have a recent_REP_recd_time\n interval = recent_REP_recd_time - recent_REQ_sent_time\n if interval.total_seconds() <= 0.0:\n # recent_REP_recd_time is not later than recent_REQ_sent_time\n print('After image send in REP_watcher test,')\n print('No REP received within', patience_seconds, 'seconds.')\n print('Ending sending program.')\n os.kill(pid, signal.SIGTERM)\n pass\n continue # Got REP after REQ so continue to next REQ\n except IndexError: # there was a REQ, but no timely REP\n print('After image send in REP_watcher test,')\n print('No REP received within', patience_seconds, 'seconds.')\n print('Ending sending program.')\n os.kill(pid, signal.SIGTERM)\n pass\n except IndexError: # there wasn't a time in REQ_sent_time\n # so there is no REP expected,\n # ... continue to loop until there is a time in REQ_sent_time\n pass", "def post(self):\n\n headers = ''\n for key, value in self.request.headers.iteritems():\n headers += '%s: %s' % (key, value)\n headers += '\\r\\n'\n InboundRequest.add_record(datetime.utcnow(), self.request.host_url,\n self.request.path, headers, self.request.query_string, self.request.body)\n\n taskqueue.add(url='/check_wipe_task')", "def run(self):\n while True:\n req = self._requests.get()[1]\n req.start()\n logging.info('Running request %s', req)", "def register_background_task_reporter():\n from tornado import ioloop\n\n cb = ioloop.PeriodicCallback(log_background_task_status, 60 * 3 * 1000)\n cb.start()\n return cb", "def Serve(self):\n t = threading.Thread(target=self.serve_forever)\n t.setDaemon(True)\n t.start()\n self._Announce()", "def monitor_ws(self, endpoint='https://www.bitmex.com/api/v1'): #TODO: implement\n started = False\n restart_count = 0\n while True:\n if not started or self.ws.exited or self.ws is None:\n self.ws = BitMEXWebsocket(endpoint=endpoint, symbol=self.symbol,\n api_key=self.api_key, api_secret=self.api_secret)\n time.sleep(1)\n if self.ws.started:\n self.logger.info('Websocket is running.')\n started = True\n self.ws_restarting = False\n else:\n if self.ws.lagging:\n self.logger.error('Ws is lagging ,forcing a restart...')\n self.ws.exit()\n started = False\n self.ws_restarting = True\n restart_count += 1\n self.logger.info(f'Ws starts this session: {restart_count}')\n\n else:\n time.sleep(1)", "def loop_forever(self):\n while self.running:\n time.sleep(0.01)", "def worker_status(request):\n try:\n status = app.control.ping() or []\n except:\n # TODO: What celery exceptions are we catching here?\n status = []\n return HttpResponse(\n json.dumps(status),\n content_type=\"application/json\"\n )", "def background_stuff():\n print \"BACKGROUND\"\n\n\n\n # # global socketio\n\n # # print(wsClient.url, wsClient.products)\n # while (wsClient.MessageCount < 30):\n # print(\"\\nMessageCount =\", \"%i \\n\" % wsClient.MessageCount)\n # # time.sleep(1)\n # # socketio.emit('my response', {'data': [\"TEST\"]}, namespace=\"/test\", broadcast=True)\n # #\n # wsClient.close()\n #\n # while True:\n # time.sleep(1)\n # t = str(time.clock())\n # print t\n\n def minute_passed(oldepoch):\n return time.time() - oldepoch >= .1\n\n global wsClient\n\n # t = time.time()\n # for i in range(3000):\n # # while True:\n # # print time.clock(), t\n # # if time.clock() > ( t + .1 ):\n # # show = True #minute_passed(t)\n # # if show:\n # # print show, time.time(), t, time.time() - t\n # t = time.time()\n # cur_time = str(t)\n # socketio.emit('message', {'data': cur_time, \"msg\": wsClient.message['price'] }, namespace=\"/test\", broadcast=True)\n\n # global thread\n # thread = None", "def on_post(cls, req, resp):\n try:\n message = req.body['message']\n runnable = req.body['runnable']\n\n except KeyError as ke:\n raise falcon.HTTPMissingParam(ke.args[0]) from None\n\n session = req.headers.get('API-SESSION')\n if not session:\n raise falcon.HTTPMissingParam('API-SESSION')\n\n req.context['session'] = ObjectId(session)\n req.context['internal'] = True\n\n cls.logger.debug('Running runnable %s', runnable, extra={'session': session})\n\n runnable_class = cls.api.runnables[runnable]\n runnable_class(req).run_local(message)\n\n resp.body = {\n 'runnable': runnable,\n 'message': message,\n }\n cls.logger.debug('runnable %s status: OK', runnable, extra={'session': session})", "def get_status(self) -> bool:\n try:\n self.__driver.service.assert_process_still_running()\n return True\n except AttributeError:\n return False", "def run(self):\n while True:\n # Do something\n print('Doing something imporant in the background')\n\n self.loadData()\n time.sleep(self.interval)", "async def update_stats(self):\r\n\r\n\t\twhile True:\r\n\t\t\tlogging.info('Attempting to post server count')\r\n\t\t\ttry:\r\n\t\t\t\tawait self.dblpy.post_server_count()\r\n\t\t\t\tlogging.info(f'Posted server count ({len(self.bot.guilds)})')\r\n\t\t\texcept Exception as e:\r\n\t\t\t\tlogging.exception(f'Failed to post server count\\n{type(e).__name__}: {e}')\r\n\t\t\tawait asyncio.sleep(1800)", "def start_watcher():\n while True:\n request_date = datetime.datetime.utcnow().strftime(\"%Y%m%d\")\n pull_request_from_remote(remote_files=\"*%s*\" % request_date)\n new_requests = check_for_new_request(request_date=request_date)\n if not new_requests:\n time.sleep(5)\n continue\n\n # noinspection PyTypeChecker\n for r in new_requests:\n print(\"Processing %s\" % r)\n try:\n ret = process_new_request(r, request_date=request_date,\n add2db=True)\n print(ret)\n except:\n os.system('cp -r %s /home/sedm/growth_marshal/archived/failed/'\n % r)\n os.system('cp -r %s /home/sedm/growth_marshal/archived/%s/' %\n (r, request_date))\n\n print(\"Waiting %ss before checking for new request\" % 5)\n time.sleep(5)", "def heartbeat_process(client_id):\n asyncio.run(Client.heartbeat(client_id))", "def start(self):\n threading.Thread(target=self.serve_forever).start()", "def post(self):\n try:\n msg = json.loads(self.request.body)\n command = msg[\"command\"]\n # start/stop data_worker\n if command == \"start\":\n message = self.start_data_worker()\n self.write({\"success\": True, \"message\": message})\n elif command == \"stop\":\n message = self.stop_data_worker()\n self.write({\"success\": True, \"message\": message})\n else:\n self.write({\"success\": False, \"message\": \"unknown command\"})\n except Exception:\n log.exception(\"Exception\")\n self.write({\"success\": False, \"message\": \"error during control\"})", "def monitor_ws(self):\n started = False\n restart_count = 0\n while True:\n if not started or self.ws is None:\n self.ws = BitMEXWebsocket(endpoint=\"https://www.bitmex.com/api/v1\", symbol=self.strategy.symbol,\n api_key=self.api_key, api_secret=self.api_secret)\n sleep(1)\n #try:\n if self.ws.started:\n self.logger.info('Websocket is running.')\n started = True\n self.ws_restarting = False\n #except Exception as fuck:\n #self.logger.error(f'Error in monitor_ws: {fuck}')\n else:\n if self.ws.lagging:\n self.logger.error('Ws is lagging ,forcing a restart...')\n self.ws.exit()\n started = False\n self.ws_restarting = True\n restart_count += 1\n self.logger.info(f'Ws starts this session: {restart_count}')\n\n else:\n sleep(1)", "async def keep_alive(self, period=1, margin=.3):\n self.KeepAlive.interval = period\n self.KeepAlive.margin = margin", "async def handle_ping(request):\n return web.Response(text=f\"OK {datetime.now().isoformat()}\\n\", headers={'Content-Type': 'text/event-stream'})", "def ping_daemon(self):\n s = self.ping_interval\n while True:\n p = domintell.messages.Ping()\n self.send(p)\n time.sleep(s)", "def _is_alive(self) -> bool:\n\n if self._on:\n return True\n\n try:\n os.kill(self.proc.pid, 0)\n except (OSError, ProcessLookupError):\n return False\n\n return True", "def running(self) -> bool:\n return self._state == AsyncPostRequest._RUNNING", "def timerAction():\n timer = threading.Timer(30.0, timerAction)\n timer.daemon = True\n timer.start()\n save()", "def post(self):\n sleep(pow((self.unit * self.timeout), self.count))", "def is_alive(self):\n if self.status == 1:\n return True\n else:\n return False" ]
[ "0.6335293", "0.6263304", "0.6176471", "0.61680967", "0.6098956", "0.6098956", "0.6098956", "0.6034498", "0.5934776", "0.5786157", "0.5784672", "0.5767236", "0.57396203", "0.57379746", "0.5700311", "0.5679326", "0.5662139", "0.56477326", "0.5647195", "0.56367123", "0.5632368", "0.56243", "0.5592091", "0.5568196", "0.55598336", "0.5550228", "0.5538802", "0.5535337", "0.5510907", "0.5478055", "0.5476954", "0.54576606", "0.5450369", "0.5446191", "0.5445085", "0.54326385", "0.541886", "0.54011893", "0.539024", "0.5388052", "0.5384775", "0.53689957", "0.53668517", "0.53545487", "0.53452295", "0.531786", "0.5303038", "0.53027874", "0.5291096", "0.5290996", "0.52885085", "0.52801585", "0.52793306", "0.5247848", "0.52417064", "0.52389586", "0.523801", "0.5237575", "0.523165", "0.522742", "0.52157426", "0.5210976", "0.52099115", "0.520755", "0.5205918", "0.5200901", "0.51947063", "0.5186975", "0.5178605", "0.51681745", "0.51674783", "0.5157048", "0.51501524", "0.51451546", "0.51331717", "0.5128908", "0.51261455", "0.51228744", "0.5103215", "0.5090531", "0.5089325", "0.50857455", "0.5082183", "0.507677", "0.50720435", "0.50597346", "0.5036731", "0.50349003", "0.50279707", "0.50272673", "0.50147766", "0.50093746", "0.5002977", "0.50025254", "0.49996516", "0.49982074", "0.49855337", "0.4984125", "0.49797288", "0.49721736" ]
0.57290554
14
Sets up the root logger to send log messages to the monitoring server.
def set_up_root_logger(host, name, auth=None): root_logger = logging.getLogger() root_logger.addHandler(HTTPHandler(name, host, auth)) root_logger.setLevel(logging.DEBUG)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _begin_logging(self):\n logconf.set_up_root_logger(self.opts.logfile)", "def setup_logger():\n root = logging.getLogger()\n root.setLevel(LOGGING_LEVEL)\n formatter = logging.Formatter('%(asctime)s - %(message)s')\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(LOGGING_LEVEL)\n ch.setFormatter(formatter)\n root.addHandler(ch)", "def init_logs(self):\n\n handler = logging.FileHandler(self.app.config['LOG'])\n handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))\n self.app.logger.addHandler(handler)\n if self.app.config.get(\"LOG_LEVEL\") == \"DEBUG\":\n self.app.logger.setLevel(logging.DEBUG)\n elif self.app.config.get(\"LOG_LEVEL\") == \"WARN\":\n self.app.logger.setLevel(logging.WARN)\n else:\n self.app.logger.setLevel(logging.INFO)\n self.app.logger.info('Startup with log: %s' % self.app.config['LOG'])", "def _setup_logger():\n root = logging.getLogger()\n root.setLevel(logging.INFO)\n\n log_handle = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(\n \"[%(levelname)s] (%(asctime)s) - %(message)s\", datefmt=\"%Y-%m-%d %H:%M:%S\"\n )\n log_handle.setFormatter(formatter)\n root.addHandler(log_handle)\n\n logging.info(\"Initializing snakes\")", "def _initialize_logging(self):\n LOG_CFG = os.environ.get('LOG_CFG', 'LOCAL')\n configure_logging(LOG_CFG)\n self.logger = logging.getLogger(self.__class__.__name__)", "def initialize_logging():\n\n print 'Setting up logging...'\n\n log_level = app.config['LOGGING_LEVEL']\n # Set up default logging for submodules to use STDOUT\n # datefmt='%m/%d/%Y %I:%M:%S %p'\n fmt = '[%(asctime)s] %(levelname)s in %(module)s: %(message)s'\n logging.basicConfig(stream=sys.stdout, level=log_level, format=fmt)\n\n # Make a new log handler that uses STDOUT\n handler = logging.StreamHandler(sys.stdout)\n handler.setFormatter(logging.Formatter(fmt))\n handler.setLevel(log_level)\n\n # Remove the Flask default handlers and use our own\n handler_list = list(app.logger.handlers)\n\n for log_handler in handler_list:\n app.logger.removeHandler(log_handler)\n\n app.logger.addHandler(handler)\n app.logger.setLevel(log_level)\n app.logger.info('Logging handler established')", "def setupLogging(self):\n\t\ttry:\n\t\t\tself.logger = logging.getLogger(__name__)\n\t\t\thandler = RotatingFileHandler(self.logFile, maxBytes=500000, backupCount=5)\n\t\t\tformat = \"%(asctime)s %(levelname)-8s %(message)s\"\n\t\t\thandler.setFormatter(logging.Formatter(format))\n\t\t\thandler.setLevel(logging.INFO)\n\t\t\tself.logger.addHandler(handler)\n\t\t\tself.logger.setLevel(logging.INFO)\n\t\texcept Exception as err:\n\t\t\terrorStr = 'Error initializing log file, ',err\n\t\t\tprint(errorStr)\n\t\t\texit(1)", "def _setup_default_logger(self):\n #print(f\"setup default logger is called by {self}\")\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(logging.Formatter(\n '%(process)d-%(levelname)s-%(asctime)s.%(msecs)02d-%(message)s',\n datefmt='%Y-%m-%d,%H:%M:%S'))\n self.logger.addHandler(stream_handler)\n self.logger.propagate = True # don't propagate to the root logger! ", "def init() -> None:\n log_format = logging.Formatter(\"%(levelname)s || %(name)s || %(asctime)s || %(message)s\")\n\n log_file = Path(\"logs\", \"rl_snake.log\")\n log_file.parent.mkdir(exist_ok=True)\n\n file_handler = handlers.RotatingFileHandler(\n log_file,\n maxBytes=3000000,\n backupCount=5\n )\n file_handler.setFormatter(log_format)\n\n root_logger = logging.getLogger()\n root_logger.addHandler(file_handler)\n root_logger.setLevel(logging.DEBUG if constants.Misc.debug else logging.INFO)\n\n root_logger.info(\"Root logger initilised\")", "def _setup_logging(self):\n if self.app_config_has(\"logging\"):\n log_config = self.app_config()[\"logging\"]\n filename_list = [\n v['filename'] for k, v in\n _find_config_tree(log_config, \"filename\")\n ]\n # pre-create directory in advance for all loggers\n for file in filename_list:\n file_dir = os.path.dirname(file)\n if file_dir and not os.path.isdir(file_dir):\n os.makedirs(file_dir, exist_ok=True)\n dictConfig(log_config)\n else:\n log = getLogger()\n handler = StreamHandler()\n formatter = Formatter(\n \"%(asctime)s-%(threadName)s-%(name)s-%(levelname)s-%(message)s\"\n )\n handler.setFormatter(formatter)\n log.addHandler(handler)\n log.setLevel(DEBUG)\n msg = (\"Starting \" + os.path.basename(__name__) +\n \" version \" + __version__ + \" on \" +\n \"_\".join(uname()).replace(\" \", \"_\"))\n logger = getLogger(__name__)\n logger.debug(msg)", "def setup_root_logger(loglevel=logging.DEBUG, logdir=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Logs'),\n log_config_file=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Utils', 'cent_logger.json')):\n try:\n\n if not os.path.isdir(logdir):\n os.makedirs(logdir)\n\n if log_config_file is not None and os.path.exists(log_config_file):\n with open(log_config_file, 'rt') as logconf:\n config = json.load(logconf)\n # create absolute path for logfile\n config['handlers']['file_handler']['filename'] = logdir + '/' + config['handlers']['file_handler']['filename']\n config['handlers']['longterm']['filename'] = logdir + '/' + config['handlers']['longterm']['filename']\n config['handlers']['single_run']['filename'] = logdir + '/' + config['handlers']['single_run']['filename']\n root_logger = logging.getLogger(\"framework\")\n logging.config.dictConfig(config)\n logger.info(\"I initialized the framework logger\")\n root_logger.info(\"Configured basic root logger from: {}\".format(log_config_file))\n test_logger = logging.getLogger(\"tests\")\n logging.config.dictConfig(config)\n logger.info(\"I initialized the tests logger\")\n test_logger.info(\"Configured basic tests logger from: {}\".format(log_config_file))\n\n # disable logs from below external modules\n for disabled_module in config['disable_module_logs']:\n root_logger.debug('Disabled logging for module: {}'.format(disabled_module))\n logging.getLogger(disabled_module).disabled = True\n\n except Exception as e:\n print(\"Error configuring logger: {}\".format(e), file=sys.stderr)\n raise e#", "def _init_logger(self):\n self.logger = logging.getLogger('WSClientAPILogger')\n self.logger.setLevel(logging.DEBUG)\n self.logger_handler = logging.FileHandler(self.__class__.__name__ + '.log')\n self.logger_handler.setLevel(logging.DEBUG)\n self.logger_formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%d-%m %H:%M:%S')\n self.logger_handler.setFormatter(self.logger_formatter)\n self.logger.addHandler(self.logger_handler)", "def setup_logging(self):\n console_handler = logging.StreamHandler()\n request_logging.assign_request_filter(console_handler,\n self.additional_fields)\n logging.basicConfig(level=self.level,\n format=self.format_string,\n handlers=[console_handler])\n for handler in logging.root.handlers:\n handler.setFormatter(RedactionFormatter(handler.formatter))\n logger = logging.getLogger(__name__)\n logger.info('Established logging defaults')\n self._setup_log_levels()", "def initLogger(self):\n loglevel = self.loglevels[self.loglevel]\n log_format = '%(asctime)s name=%(name)s loglevel=%(levelname)s message=%(message)s'\n logging.basicConfig(format=log_format,\n level=loglevel)\n \tmultiprocessing.log_to_stderr(loglevel)", "def init_logger():\n root_logger = logging.getLogger()\n root_logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter(f'[%(asctime)s] %(name)s level=%(levelname)s %(filename)s:%(lineno)d \"%(message)s\"')\n handler = logging.StreamHandler()\n handler.setLevel(logging.DEBUG)\n handler.setFormatter(formatter)\n root_logger.addHandler(handler)\n\n # Silencing the noisy Kafka logger\n kafka_logger = logging.getLogger('kafka')\n kafka_logger.setLevel(logging.ERROR)", "def _configure_logging(self):\n pass", "def configure_logging(self):\n\n root_logger = logging.getLogger('')\n root_logger.setLevel(logging.DEBUG)\n\n console = logging.StreamHandler()\n console_level = self.LOG_LEVEL_MAP.get(self.options.verbose_level,\n logging.WARNING)\n console.setLevel(console_level)\n formatter = logging.Formatter(config.DEFAULT_MESSAGE_FORMAT)\n console.setFormatter(formatter)\n root_logger.addHandler(console)", "def __setup_logging(self):\n\n loglevel = logging.INFO\n if self.config[\"verbose\"]:\n loglevel = logging.DEBUG\n\n FORMAT = '[%(asctime)s %(filename)s:%(lineno)s %(levelname)s] %(message)s'\n if self.config[\"log\"]:\n logging.basicConfig(format=FORMAT, level=loglevel, filename=self.config[\"log\"])\n else:\n logging.basicConfig(format=FORMAT, level=loglevel)", "def setup():\n config['global']['log.access_file'] = ''\n config['global']['log.error_file'] = ''\n config['global']['log.screen'] = False\n log_level = getattr(logging, config.log_level)\n logging.root.setLevel(logging.NOTSET)\n file_log.setLevel(log_level)\n logging.root.addHandler(file_log)\n if config.log_screen:\n console_log.setLevel(log_level)\n logging.root.addHandler(console_log)", "def configure_logging(self):\r\n root_logger = logging.getLogger('')\r\n\r\n # Set up logging to a file\r\n root_logger.setLevel(logging.DEBUG)\r\n\r\n # Send higher-level messages to the console via stderr\r\n console = logging.StreamHandler(self.stderr)\r\n console_level = {self.WARNING_LEVEL: logging.WARNING,\r\n self.INFO_LEVEL: logging.INFO,\r\n self.DEBUG_LEVEL: logging.DEBUG,\r\n }.get(self.options.verbose_level, logging.DEBUG)\r\n console.setLevel(console_level)\r\n if logging.DEBUG == console_level:\r\n formatter = logging.Formatter(self.DEBUG_MESSAGE_FORMAT)\r\n else:\r\n formatter = logging.Formatter(self.CONSOLE_MESSAGE_FORMAT)\r\n console.setFormatter(formatter)\r\n root_logger.addHandler(console)\r\n return", "def configure(cls):\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n logger_handler = logging.StreamHandler()\n logger.addHandler(logger_handler)\n logger_handler.setFormatter(logging.Formatter('%(message)s'))\n cls.logger = logger", "def setup_logging():\n log.setup('keystone')", "def config_logging():\n\n root_logger = logging.getLogger()\n root_logger.setLevel(logging.INFO)\n\n formatter = logging.Formatter(\n \"[%(asctime)s][%(levelname)s] %(name)s %(filename)s:%(funcName)s:%(lineno)d | %(message)s\")\n\n # Enable debug HTTP request/response\n if root_logger.getEffectiveLevel() == logging.DEBUG:\n http_client.HTTPConnection.debuglevel = 1\n else:\n http_client.HTTPConnection.debuglevel = 0\n\n zabbix_handler = logging.StreamHandler(sys.stdout)\n zabbix_handler.setFormatter(formatter)\n root_logger.addHandler(zabbix_handler)\n return root_logger", "def _initialize_logging(self):\n if self._custom_logger:\n self._logger.debug(\"Skipping logging init: custom logger detected\")\n return\n\n try:\n log_config = self._ez_client.get_logging_config(\n local=bool(self._config.runner_id)\n )\n except Exception as ex:\n self._logger.warning(\n \"Unable to retrieve logging configuration from Beergarden, the default \"\n \"configuration will be used instead. Caused by: {0}\".format(ex)\n )\n return\n\n try:\n configure_logging(\n log_config,\n namespace=self._system.namespace,\n system_name=self._system.name,\n system_version=self._system.version,\n instance_name=self._config.instance_name,\n )\n except Exception as ex:\n # Reset to default config as logging can be seriously wrong now\n logging.config.dictConfig(default_config(level=self._config.log_level))\n\n self._logger.exception(\n \"Error encountered during logging configuration. This most likely \"\n \"indicates an issue with the Beergarden server plugin logging \"\n \"configuration. The default configuration will be used instead. Caused \"\n \"by: {0}\".format(ex)\n )\n return\n\n # Finally, log uncaught exceptions using the configuration instead of stderr\n self._set_exception_hook(self._logger)", "def configure_logging():\n # console_handler = TTSHandler()\n root = logging.getLogger('node_' + __name__)\n root.setLevel(logging.INFO)\n\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.DEBUG)\n\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n console_handler.setFormatter(formatter)\n root.addHandler(console_handler)\n\n root = logging.getLogger()\n root.addHandler(console_handler)\n # the default formatter just returns the message\n root.setLevel(logging.DEBUG)", "def configure_logging(self):\n root_logger = logging.getLogger('')\n\n # Set up logging to a file\n root_logger.setLevel(logging.DEBUG)\n\n # Send higher-level messages to the console via stderr\n console = logging.StreamHandler(self.stderr)\n console_level = {self.WARNING_LEVEL: logging.WARNING,\n self.INFO_LEVEL: logging.INFO,\n self.DEBUG_LEVEL: logging.DEBUG,\n }.get(self.options.verbose_level, logging.DEBUG)\n # The default log level is INFO, in this situation, set the\n # log level of the console to WARNING, to avoid displaying\n # useless messages. This equals using \"--quiet\"\n if console_level == logging.INFO:\n console.setLevel(logging.WARNING)\n else:\n console.setLevel(console_level)\n if logging.DEBUG == console_level:\n formatter = logging.Formatter(self.DEBUG_MESSAGE_FORMAT)\n else:\n formatter = logging.Formatter(self.CONSOLE_MESSAGE_FORMAT)\n logging.getLogger('iso8601.iso8601').setLevel(logging.WARNING)\n logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)\n console.setFormatter(formatter)\n root_logger.addHandler(console)\n return", "def set_up_logger(self, logger_name):\n\t\tself.logger = logging.getLogger(logger_name)\n\t\tself.logger.setLevel(logging.INFO)\n\t\tconsole_handler = logging.StreamHandler()\n\t\tfile_handler = logging.FileHandler('main_coordinator.log', 'w+')\n\t\tconsole_handler.setLevel(logging.INFO)\n\t\tfile_handler.setLevel(logging.INFO)\n\t\tconsole_format = logging.Formatter('%(name)s - %(levelname)s - %(message)s')\n\t\tfile_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\t\tconsole_handler.setFormatter(console_format)\n\t\tfile_handler.setFormatter(file_format)\n\t\tself.logger.addHandler(console_handler)\n\t\tself.logger.addHandler(file_handler)", "def setup_class(cls):\n if os.path.exists(logfilename):\n os.remove(logfilename)\n log = logutils.get_logger(__name__)\n log.root.handlers = []\n logutils.config(mode='standard', console_lvl='stdinfo',\n file_name=logfilename)", "def setUp(self):\n self.logger = logging.getLogger(glutil.root_package_name)\n self.orig_handlers = self.logger.handlers\n self.logger.handlers = []\n self.level = self.logger.level\n self.logger.level = logging.DEBUG\n\n self.rt_logger = logging.getLogger()\n self.orig_root_handlers = self.rt_logger.handlers\n self.rt_logger.handlers = []\n self.root_level = self.rt_logger.level\n self.rt_logger.level = logging.CRITICAL", "def initLogging(self):\n logging.basicConfig(level=self.loglevel, stream=sys.stderr)", "def setup_logging():\n if not app.debug:\n if app.config.get('LOG_CFG'):\n # initialize the Flask logger (removes all handlers)\n _ = app.logger\n dictConfig(app.config.get('LOG_CFG'))\n else:\n # capability with previous config settings\n # Should have LOG_FILE and LOG_LEVEL set\n if app.config.get('LOG_FILE') is not None:\n handler = RotatingFileHandler(app.config.get('LOG_FILE'), maxBytes=10000000, backupCount=100)\n else:\n handler = StreamHandler(stream=sys.stderr)\n\n handler.setFormatter(\n Formatter('%(asctime)s %(levelname)s: %(message)s '\n '[in %(pathname)s:%(lineno)d]')\n )\n app.logger.setLevel(app.config.get('LOG_LEVEL', DEBUG))\n app.logger.addHandler(handler)", "def logger_settings(self):\n LOG_CONFIG['root']['handlers'].append(self.logmode)\n flask_log = logging.getLogger(DEFAULT_NAME_FLASK_LOGGER)\n flask_log.setLevel(logging.ERROR)\n dictConfig(LOG_CONFIG)\n self.logger = logging.getLogger()", "def _instanciate_logger(self):\n\t\tself._logger = logging.getLogger('main')\n\t\tself._logger.setLevel(logging.DEBUG)\n\t\tself._logger.addHandler(logging.StreamHandler())", "def configure_logging():\n logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')\n\n # Enable logging to syslog as well:\n # Normally this would not be necessary but logging assumes syslog listens on\n # localhost syslog/udp, which is disabled on 10.5 (rdar://5871746)\n syslog = logging.handlers.SysLogHandler('/var/run/syslog')\n syslog.setFormatter(logging.Formatter('%(name)s: %(message)s'))\n syslog.setLevel(logging.INFO)\n logging.getLogger().addHandler(syslog)", "def initialize_logging(log_level=logging.INFO):\n if not app.debug:\n print 'Setting up logging...'\n # Set up default logging for submodules to use STDOUT\n # datefmt='%m/%d/%Y %I:%M:%S %p'\n fmt = '[%(asctime)s] %(levelname)s in %(module)s: %(message)s'\n logging.basicConfig(stream=sys.stdout, level=log_level, format=fmt)\n # Make a new log handler that uses STDOUT\n handler = logging.StreamHandler(sys.stdout)\n handler.setFormatter(logging.Formatter(fmt))\n handler.setLevel(log_level)\n # Remove the Flask default handlers and use our own\n handler_list = list(app.logger.handlers)\n for log_handler in handler_list:\n app.logger.removeHandler(log_handler)\n app.logger.addHandler(handler)\n app.logger.setLevel(log_level)\n app.logger.info('Logging handler established')", "def initialize_logging(log_level=logging.INFO):\n if not app.debug:\n print('Setting up logging...')\n\n # Set up default logging for submodules to use STDOUT\n # datefmt='%m/%d/%Y %I:%M:%S %p'\n fmt = '[%(asctime)s] %(levelname)s in %(module)s: %(message)s'\n logging.basicConfig(stream=sys.stdout, level=log_level, format=fmt)\n\n # Make a new log handler that uses STDOUT\n handler = logging.StreamHandler(sys.stdout)\n handler.setFormatter(logging.Formatter(fmt))\n handler.setLevel(log_level)\n\n # Remove the Flask default handlers and use our own\n handler_list = list(app.logger.handlers)\n for log_handler in handler_list:\n app.logger.removeHandler(log_handler)\n app.logger.addHandler(handler)\n app.logger.setLevel(log_level)\n app.logger.propagate = False\n app.logger.info('Logging handler established')", "def configure_logger():\n logger = logging.getLogger()\n handler = logging.StreamHandler()\n formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.INFO)", "def log_setup(self):\n # Logger initialisation\n logger = logging.getLogger(self.app_name)\n logger.setLevel(logging.DEBUG)\n\n # Creating console handler and set level to debug\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n\n # Creating formatter\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n )\n\n # Adding formatter to ch\n ch.setFormatter(formatter)\n\n # Adding ch to logger\n logger.addHandler(ch)\n\n # Setting the Logger Level (INFO)\n logger.setLevel(logging.INFO)\n\n return logger", "def initialize_root_logger(log_level=INFO):\n formatter = Formatter(LOGGING_FORMAT)\n\n console_handler = StreamHandler()\n console_handler.setFormatter(formatter)\n\n root_logger = getLogger(__name__)\n root_logger.setLevel(log_level)\n root_logger.addHandler(console_handler)\n\n return root_logger", "def initialize_logging(log_level=logging.INFO):\n if not app.debug:\n print('Setting up logging...')\n # Set up default logging for submodules to use STDOUT\n # datefmt='%m/%d/%Y %I:%M:%S %p'\n fmt = '[%(asctime)s] %(levelname)s in %(module)s: %(message)s'\n logging.basicConfig(stream=sys.stdout, level=log_level, format=fmt)\n # Make a new log handler that uses STDOUT\n handler = logging.StreamHandler(sys.stdout)\n handler.setFormatter(logging.Formatter(fmt))\n handler.setLevel(log_level)\n # Remove the Flask default handlers and use our own\n handler_list = list(app.logger.handlers)\n for log_handler in handler_list:\n app.logger.removeHandler(log_handler)\n app.logger.addHandler(handler)\n app.logger.setLevel(log_level)\n app.logger.info('Logging handler established')", "def setup_logging():\n formatter = logging.Formatter(LOG_FORMAT)\n level = logging.INFO\n\n file_handler = logging.FileHandler('db.log')\n file_handler.setFormatter(formatter)\n file_handler.setLevel(level)\n\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(formatter)\n console_handler.setLevel(level)\n\n logger = logging.getLogger()\n logger.addHandler(file_handler)\n logger.addHandler(console_handler)\n\n logger.setLevel(level)", "def setup_logging():\n lvl = os.getenv(\"LOG_LEVEL\")\n path = os.getenv(\"LOG_PATH\")\n\n logger = get_logger()\n logger.setLevel(lvl)\n\n filehandler = logging.FileHandler(path)\n filehandler.setLevel(lvl)\n filehandler.setFormatter(logging.Formatter(\n \"[%(asctime)s] %(levelname)s: %(message)s\",\n datefmt=\"%Y-%d-%m %H:%M:%S\"\n ))\n\n streamhandler = logging.StreamHandler()\n streamhandler.setLevel(lvl)\n streamhandler.setFormatter(logging.Formatter(\"%(message)s\"))\n\n logger.addHandler(filehandler)\n logger.addHandler(streamhandler)", "def _logger_setup(self, log_file):\r\n # settings\r\n self.log_formatter = logging.Formatter(\"%(asctime)s %(processName)s %(thread)d %(message)s\")\r\n self.default_logging_level = logging.DEBUG\r\n\r\n # set up listener thread for central logging from all processes\r\n queue_manager = multiprocessing.Manager()\r\n self.log_queue = queue_manager.Queue()\r\n self.log_listener = Listener(self.log_queue, self.log_formatter,\r\n self.default_logging_level, log_file)\r\n # note: for debugging, comment out the next line. Starting the listener\r\n # will cause pipe breakage in case of a bug elsewhere in the code,\r\n # and the console will be flooded with error messages from the\r\n # listener.\r\n self.log_listener.start()", "def setup_logger() -> None:\n LOGGER.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(levelname)s \\t|%(asctime)s \\t| %(name)s \\t| %(message)s')\n\n if not check_if_dir_exists(FILENAMES.LOG_DIR):\n os.mkdir(to_abs_file_path(FILENAMES.LOG_DIR))\n\n file_handler: logging.FileHandler = logging.FileHandler(to_abs_file_path(FILENAMES.LOG), mode='w')\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n\n console_handler: logging.StreamHandler = logging.StreamHandler()\n console_handler.setLevel(logging.WARNING)\n\n LOGGER.addHandler(file_handler)\n LOGGER.addHandler(console_handler)\n LOGGER.info('Filehandler and Console_Handler were born, let\\'s start logging')", "def setup_logging(log_basedir=\"logs\"):\n BASEDIR = os.path.abspath(os.path.dirname(__file__))\n LOGDIR = os.path.join(BASEDIR,log_basedir)\n \n # Check if the logs directory exists and is writable\n if not os.path.isdir(LOGDIR):\n print('ERROR: Log directory {} does not exist.'.format(LOGDIR))\n sys.exit(1)\n if not os.access(LOGDIR, os.W_OK):\n print('ERROR: No permissions to write to log directory {}.'.format(LOGDIR))\n sys.exit(1)\n\n # Set the log message format\n fmt = '%(levelname)s - %(asctime)s.%(msecs).03d %(process)d [%(filename)s:%(lineno)d] %(message)s'\n datefmt = '%m%d %H:%M:%S'\n formatter = logging.Formatter(fmt, datefmt)\n\n # Log to console\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setLevel(logging.DEBUG)\n console_handler.setFormatter(formatter)\n\n root = logging.getLogger()\n root.setLevel(logging.DEBUG)\n root.addHandler(console_handler)\n\n # Log to file, use a rotating file\n file_name = os.path.join(LOGDIR, '{}.log'.format(\"flask_api_otrs\") )\n\n file_handler = logging.handlers.RotatingFileHandler(file_name, backupCount=7)\n file_handler.setFormatter(formatter)\n root.addHandler(file_handler)", "def setup_logger():\n mc_logger = logging.getLogger('chess_logger')\n mc_logger.setLevel(logging.DEBUG)\n console_handler = logging.StreamHandler()\n formatter = logging.Formatter(fmt='%(asctime)s %(levelname)-8s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n console_handler.setFormatter(formatter)\n mc_logger.addHandler(console_handler)", "def _configure_logging(self):\n self.log_level = Scaffold.LOG_LEVEL_MAP.get(self.log_level, ERROR)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n # assign the windmill instance logger\n #logging.basicConfig()\n self.log = logging.getLogger(self.name)\n self.log.setLevel(self.log_level)\n\n if self.log_path:\n file_path = None\n if self.log_path.endswith('.log'):\n file_path = self.log_path\n else:\n file_path = os.path.join(self.log_path, self.name + '.log')\n assert file_path\n file_handler = logging.FileHandler(file_path)\n file_handler.setLevel(self.log_level)\n file_handler.setFormatter(formatter)\n self.log.addHandler(file_handler)\n\n # if we are in verbose mode, then we send log output to console\n if self.verbose:\n # add the console logger for verbose mode\n console_handler = logging.StreamHandler()\n console_handler.setLevel(self.log_level)\n console_handler.setFormatter(formatter)\n self.log.addHandler(console_handler)\n\n self.log.info('Logging configured for: %s', self.name)", "def log_setup():\n logger = logging.getLogger('diskover')\n logger_warn = logging.getLogger('diskover_warn')\n eslogger = logging.getLogger('elasticsearch')\n diskover_eslogger = logging.getLogger('diskover_elasticsearch')\n loglevel = config['logLevel'].get()\n if options.debug:\n loglevel = 'DEBUG'\n if loglevel == 'DEBUG':\n loglevel = logging.DEBUG\n elif loglevel == 'INFO':\n loglevel = logging.INFO\n else:\n loglevel = logging.WARN\n logformat = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n if logtofile:\n # create log file name using top dir names and datestamp\n treedirsstr = ''\n if args:\n n = 1\n dirs = args[0:]\n x = len(dirs)\n for d in dirs:\n if d != '/':\n d = d.rstrip('/')\n treedirsstr += os.path.basename(d)\n if n < x:\n treedirsstr += '_'\n n += 1\n else:\n treedirsstr = os.path.basename(os.getcwd())\n logfiletime = datetime.now().isoformat()\n logname = 'diskover_' + treedirsstr + '_' + logfiletime + '.log'\n logfile = os.path.join(logdir, logname)\n handler_file = logging.FileHandler(logfile)\n handler_file.setFormatter(logging.Formatter(logformat))\n logger.setLevel(loglevel)\n logger.addHandler(handler_file)\n # console logging\n handler_con = logging.StreamHandler()\n handler_con.setFormatter(logging.Formatter(logformat))\n logger.addHandler(handler_con)\n # warnings log\n logname_warn = 'diskover_' + treedirsstr + '_' + logfiletime + '_warnings.log'\n logfile_warn = os.path.join(logdir, logname_warn)\n handler_warnfile = logging.FileHandler(logfile_warn)\n handler_warnfile.setFormatter(logging.Formatter(logformat))\n logger_warn.setLevel(logging.WARN)\n logger_warn.addHandler(handler_warnfile)\n # es logger\n eslogger.setLevel(logging.WARN)\n eslogger.addHandler(handler_file)\n eslogger.addHandler(handler_con)\n # diskover es logger\n diskover_eslogger.setLevel(loglevel)\n diskover_eslogger.addHandler(handler_file)\n diskover_eslogger.addHandler(handler_con)\n else:\n handler_file = None\n handler_warnfile = None\n handler_con = None\n logging.basicConfig(format=logformat, level=loglevel)\n eslogger.setLevel(logging.WARN)\n return logger, logger_warn, loglevel, logformat, \\\n handler_file, handler_warnfile, handler_con", "def setup_logger():\n\n global _logger\n global _has_logbook\n\n if _has_logbook:\n _logger = Logger('UoM_WIFI')\n try:\n log_path = join(sys.argv[1], '%s.log' % USERNAME)\n except IndexError:\n log_path = join(split(abspath(__file__))[0], '%s.log' % USERNAME)\n\n # because the log file is owned by root, if this program is ran by a\n # regular user, we need to prevent it from crashing by writing to a file\n # owned by root\n try:\n # create the handler\n log_handler = RotatingFileHandler(log_path)\n\n # push the context object to the application stack\n log_handler.push_application()\n except IOError:\n _has_logbook = False", "def initialize_logger(self):\n\n # initialize logger\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n # logger console handler\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setLevel(logging.INFO)\n console_handler.setFormatter(logging.Formatter(\"\"))\n logger.addHandler(console_handler)", "def initialize_log():\n logging.basicConfig(\n format='%(asctime)s %(levelname)-8s %(message)s',\n level=logging.INFO,\n datefmt='%Y-%m-%d %H:%M:%S',\n )", "def init_logging():\n\n logger = logging.getLogger()\n handler = logging.StreamHandler()\n formatter = logging.Formatter(\n '%(asctime)s %(levelname)-8s %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.INFO)", "def init():\n global logger\n\n with open(\"/app/log.json\", \"r\") as fd:\n logging.config.dictConfig(json.load(fd))\n\n logger = logging.getLogger()", "def setup_logging():\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n console = logging.StreamHandler(sys.stdout)\n console.setLevel(logging.DEBUG)\n console.setFormatter(formatter)\n root = logging.getLogger()\n root.addHandler(console)\n root.setLevel(logging.DEBUG)", "def setup_logging():\n logging.basicConfig(format='%(levelname)s: %(message)s', level=LOGLEVEL)", "def setup_logging():\n logging.basicConfig(\n filename=os.getenv(\"SERVICE_LOG\", \"server.log\"),\n level=logging.DEBUG,\n format=\"%(levelname)s: %(asctime)s pid:%(process)s module:%(module)s %(message)s\",\n datefmt=\"%d/%m/%y %H:%M:%S\",\n )", "def _setup_logging():\n logging.Formatter.converter = time.gmtime\n logging.basicConfig(\n format='%(asctime)s %(message)s',\n level=logging.DEBUG,\n filename='conduit-proxy.log')\n\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n logging.getLogger().addHandler(console)", "def setup_logger():\n now = datetime.now()\n logging.basicConfig(level=logging.DEBUG)\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n logging.getLogger(\"urllib3\").setLevel(logging.WARNING)\n logging.info(f\"Script run on: {now}\")", "def init_logs() -> None:\n logging.basicConfig(\n filename=\"logs.txt\",\n filemode=\"w\",\n format=\"%(asctime)s:%(levelname)s:%(filename)s - %(message)s\",\n level=logging.ERROR,\n )\n\n formatter = logging.Formatter(\n \"%(asctime)s:%(levelname)s:%(filename)s - %(message)s\"\n )\n\n global logger\n logger = logging.getLogger(__name__)\n\n # simlogger = logging.getLogger(\"netsquid\")\n # simlogger.setLevel(logging.DEBUG)\n # fhandler = logging.FileHandler(\"simlogs.txt\", mode=\"w\")\n # fhandler.setFormatter(formatter)\n # simlogger.addHandler(fhandler)\n\n # shandler = logging.StreamHandler(stream=sys.stdout)\n # shandler.setLevel(logging.ERROR)\n # shandler.setFormatter(formatter)\n # simlogger.addHandler(shandler)", "async def setup(self):\n\t\tlogging.config.dictConfig(self.log_settings['log'])\n\t\tself.logger = logging.getLogger('Responder3')\n\t\tself.create_dir_strucutre()\n\n\t\tif 'handlers' in self.log_settings:\n\t\t\tasync for handlerclass, handler in self.get_handlers():\n\t\t\t\tawait self.start_extension(handlerclass, self.log_settings[self.log_settings['handlers'][handler]])", "def init_logs():\n\n #Ensure that the directories are made\n make_dirs()\n\n #Create FileHandler logging handler, set it's log level, configure the log storage format,\n # and add the formatter to the root logger\n fh = logging.FileHandler(log_file)\n fh.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n logging.root.addHandler(fh)\n logging.root.setLevel(logging.INFO)\n\n #Report it to the world!\n logging.info(\"Saving logs to \" + log_file)", "def __init__(self, default_level=logging.WARNING):\n # All loggers are an attr of self for tab completion in iPython\n # (with . replaced with _)\n self._loggerdict = logging.Logger.manager.loggerDict\n for name, logger in self._loggerdict.iteritems():\n attr = name.replace('.', '_')\n setattr(self, attr, logger)\n\n if len(logging.root.handlers) == 0:\n # The default level is INFO\n fmt='%(levelname)-7s | %(asctime)-23s | %(name)-8s | %(message)s'\n logging.basicConfig(format=fmt, level=default_level)\n logging.StreamHandler.emit = self._emit_wrap", "def setup_logfile():\r\n from core.general.appinit import log_init\r\n log_init(\r\n 'general',\r\n 'django_api'\r\n )", "def init_logger():\n lformat = \"%(asctime)s [%(levelname)-5.5s] [%(name)s] [%(threadName)-12.12s] %(message)s\"\n\n logging.basicConfig(\n level=logging.INFO,\n format=lformat,\n )\n\n file_handler = handlers.RotatingFileHandler(\n \"{0}/{1}.log\".format('.', 'meta-meta-hive'),\n maxBytes=(50*1024*1024),\n backupCount=7\n )\n file_handler.setFormatter(logging.Formatter(lformat))\n logging.getLogger().addHandler(file_handler)\n return", "def setupLogger():\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(name)s %(levelname)s: %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n filename='prepareToSubmit.log',\n filemode='w')\n # define a Handler which writes INFO messages or higher to the sys.stderr\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n # set a format which is simpler for console use\n formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')\n # tell the handler to use this format\n console.setFormatter(formatter)\n # add the handler to the root logger\n logging.getLogger('').addHandler(console)", "def setup_logging():\n for name, logger in loggers.items():\n logger.setLevel(LOGGING_MAPPING.get(options.logging, logging.DEBUG))\n handler = logging.FileHandler(\n getattr(options, '{}_log_file_path'.format(name))\n )\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)", "def setup_logging_with_config(config: DynaBox):\n global logger\n logger = setup_logging_threatbus(config, logger_name)", "def configure_logger (max_threads):\n\t\t# Hack for log line separator\n\t\twith open(\"pinger.log\", \"a\") as log:\n\t\t\tlog.write(\n\t\t\t\t\"==============================================================================================\\n\")\n\n\t\tlogging.basicConfig(filename=\"pinger.log\", level=logging.DEBUG, filemode='a',\n\t\t format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s', datefmt='%d.%m.%Y %H:%M:%S')\n\t\tlogging.info(\"Started with max threads: %d\", max_threads)", "def init_logging():\n global logger\n logging.basicConfig(\n format='%(levelname)s - %(message)s',\n )\n logger = logging.getLogger('runner')\n logger.setLevel(os.environ.get('LOGGING_LEVEL', 'INFO'))", "def setup_logging(log_dir: Optional[str] = None) -> None:\n config: Dict[str, Any] = {\n \"version\": 1,\n \"disable_existing_loggers\": True,\n \"formatters\": {\"console\": {\"format\": \"%(asctime)s:\\t%(message)s\"}},\n \"handlers\": {\n \"console\": {\n \"level\": \"WARNING\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"console\",\n \"stream\": \"ext://sys.stdout\",\n }\n },\n \"loggers\": {\n LOG_NAME: {\"handlers\": [\"console\"], \"level\": \"DEBUG\", \"propagate\": False}\n },\n }\n if log_dir is not None:\n config[\"loggers\"][LOG_NAME][\"handlers\"].append(\"file\")\n config[\"formatters\"][\"file\"] = {\n \"format\": \"%(asctime)s - %(levelname)s - %(name)s - %(message)s\"\n }\n config[\"handlers\"][\"file\"] = {\n \"level\": \"DEBUG\",\n \"class\": \"logging.handlers.RotatingFileHandler\",\n \"formatter\": \"file\",\n \"filename\": os.path.join(log_dir, LOG_NAME + \".log\"),\n \"maxBytes\": 1000000,\n \"backupCount\": 3,\n }\n logging.config.dictConfig(config)", "def setup_logging():\n log_format = '%(asctime)-15s %(levelname)s: %(message)s'\n logging.basicConfig(format=log_format, level=logging.DEBUG,\n filename='counting_consumer.out')", "def init_logging():\n logger.setLevel(logging.DEBUG)\n # set a common log format\n logFormatter = logging.Formatter(\"%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s\")\n # setup our rotating file handler and assign our common formatter to it\n rotating_file_handler = RotatingFileHandler('my_log.log', maxBytes=200000, backupCount=10)\n rotating_file_handler.setFormatter(logFormatter)\n logger.addHandler(rotating_file_handler)\n \n if DEBUG:\n # print to stdout if we are debugging\n stream_handler = logging.StreamHandler(sys.stdout)\n stream_handler.setFormatter(logFormatter)\n logger.addHandler(stream_handler)", "def _init():\n global logger\n logger = logging.getLogger(\"Log\")", "def __init__(self, logging_level, root_dir, logs_path):\n logging.basicConfig(level=logging_level,\n filename=os.path.join(root_dir, logs_path))", "def test_root_logger_config(self):\n with debug_env:\n logging.config.dictConfig(django12factor.factorise()[\"LOGGING\"])\n self.assertTrue(has_handler(logging.root, \"stdout\"))", "def _configure_logging(self):\r\n self._logger = logging.getLogger('AWSIoTPythonSDK.core')\r\n self._logger.setLevel(logging.ERROR)\r\n self._streamHandler = logging.StreamHandler()\r\n self._formatter = logging.Formatter(\r\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\n self._streamHandler.setFormatter(self._formatter)\r\n self._logger.addHandler(self._streamHandler)", "def _setup_logging(self, log_level_name: str):\n log_level_name = log_level_name.upper()\n if log_level_name not in ['CRITICAL','ERROR', 'WARNING', 'INFO',\n 'DEBUG']:\n print('Invalid debug level: {}'.format(log_level_name))\n sys.exit(0)\n\n log_level = getattr(logging, log_level_name)\n\n logger = logging.getLogger(ROOT_NAMESPACE)\n logger.setLevel(log_level)\n\n ch = logging.StreamHandler()\n ch.setLevel(log_level)\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n\n logger.addHandler(ch)", "def setup_logging(log_file):\n\tglobal logger\n\tif log_file:\n\t\tlogging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',filename=log_file,filemode='w',level=logging.INFO)\n\telse:\n\t\tlogging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',level=logging.INFO)\n\tlogger = logging.getLogger('default')", "def configure_logging():\n dictConfig(DEFAULT_LOGGING)\n\n default_formatter = logging.Formatter(\n \"%(asctime)s [%(levelname)s] [PID:%(process)d TID:%(thread)d] [%(filename)s:%(lineno)s in `%(funcName)s`] %(message)s\",\n \"%Y-%m-%d %H:%M:%S\")\n\n # file_handler = logging.handlers.RotatingFileHandler(logfile_path, maxBytes=10485760,backupCount=300, encoding='utf-8')\n # file_handler.setLevel(logging.INFO)\n\n if len(logging.getLogger().handlers) > 0:\n for h in logging.getLogger().handlers:\n if isinstance(h, logging.StreamHandler):\n # Then we found a logger to the terminal\n h.setLevel(logging.DEBUG)\n h.setFormatter(default_formatter)\n\n else:\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.DEBUG)\n console_handler.setFormatter(default_formatter)\n logging.root.addHandler(console_handler)\n\n\n logging.root.setLevel(logging.WARNING)", "def initializeLoggers(self):\n \n #Interesting logger attributes: Logger.findCaller(), logging.addLevelName(lvl, levelName), logging.getLoggerClass() \n \n #if not(os.path.exists(self.settings.get(\"log\",\"logDir\"))):os.makedirs(self.settings.get(\"log\",\"logDir\")) # (025) Config parser pushed out because of settings access harmonizing.\n if not(os.path.exists(self.settings.logDir)): os.makedirs(self.settings.logDir)\n \n if not hasattr(self,\"hitLogger\"):\n # Since debug is Singleton, there's a risk that the loggers are initialized multiple times.\n self.hitLogger=logging.getLogger('hitLogger')\n self.hitLogger.setLevel(self.settings.config.getDebugLevel(\"log\",\"hitsLogLevel\")) #025\n #self.hitLogger.setLevel(self.settings.hitsLogLevel) # 025 # Returns string instead of logLevel\n #Rotating file handler is probably not needed, but File handler didn't work.\n #logFileName=self.settings.insertDatetimeIntoFilename(self.settings.hitsLogFileName)\n hitLoggerPath=self.settings.logDir+\"/\"+self.settings.hitsLogFileName#logFileName\n #fileHandler=logging.handlers.RotatingFileHandler(hitLoggerPath, maxBytes=52428800, backupCount=5)\n fileHandler=logging.handlers.TimedRotatingFileHandler(hitLoggerPath, when='midnight', interval=1, backupCount=20)\n self.hitLogger.addHandler(fileHandler) \n \n if not hasattr(self,\"cllLogger\"):\n # Since debug is Singleton, there's a risk that the loggers are initialized multiple times.\n self.cllLogger=logging.getLogger('cllLogger')\n self.cllLogger.setLevel(self.settings.config.getDebugLevel(\"log\",\"hitsLogLevel\")) # 025 # hitsLogLevel is not a mistake. It is deliberately the same.\n #self.cllLogger.setLevel(self.settings.hitsLogLevel) # Returns string instead of logLevel# hitsLogLevel is not a mistake. It is deliberately the same.\n logFileName=self.settings.insertDatetimeIntoFilename(self.settings.cllsLogFileName)\n cllLoggerPath=self.settings.logDir+\"/\"+logFileName\n fileHandler=logging.handlers.RotatingFileHandler(cllLoggerPath, maxBytes=52428800, backupCount=50)\n self.cllLogger.addHandler(fileHandler) \n\n if not hasattr(self,\"cntLogger\"): # Counter logger\n # Since debug is Singleton, there's a risk that the loggers are initialized multiple times.\n self.cntLogger=logging.getLogger('cntLogger')\n self.cntLogger.setLevel(self.settings.config.getDebugLevel(\"log\",\"hitsLogLevel\")) # hitsLogLevel is not a mistake. It is deliberately the same.\n #logFileName=self.settings.insertDatetimeIntoFilename(self.settings.get(\"log\",\"countersLogFileName\")) # 025\n logFileName=self.settings.insertDatetimeIntoFilename(self.settings.countersLogFileName)\n cntLoggerPath=self.settings.logDir+\"/\"+logFileName\n #fileHandler=logging.handlers.RotatingFileHandler(cntLoggerPath, maxBytes=52428800, backupCount=50) # 026\n fileHandler=logging.handlers.TimedRotatingFileHandler(cntLoggerPath,'midnight',1, backupCount=50)\n self.cntLogger.addHandler(fileHandler)\n\n if not hasattr(self,\"missLogger\"):\n # Since debug is Singleton, there's a risk that the loggers are initialized multiple times.\n # missLogger logs links where pattern could be found but none of them led to resource (except the seed).\n self.missLogger=logging.getLogger('missLogger')\n self.missLogger.setLevel(self.settings.config.getDebugLevel(\"log\",\"hitsLogLevel\"))\n #logFileName=self.settings.insertDatetimeIntoFilename(self.settings.missLogFileName)\n missLoggerPath=self.settings.logDir+\"/\"+self.settings.missLogFileName#logFileName\n #fileHandler=logging.handlers.RotatingFileHandler(missLoggerPath, maxBytes=52428800, backupCount=5)\n fileHandler=logging.handlers.TimedRotatingFileHandler(missLoggerPath, when='midnight', interval=1, backupCount=20)\n self.missLogger.addHandler(fileHandler) \n\n if not hasattr(self,\"nopLogger\"):\n # Since debug is Singleton, there's a risk that the loggers are initialized multiple times.\n # nopLogger = No pattern found (except the seed).\n self.nopLogger=logging.getLogger('nopLogger')\n self.nopLogger.setLevel(self.settings.config.getDebugLevel(\"log\",\"hitsLogLevel\"))\n logFileName=self.settings.insertDatetimeIntoFilename(self.settings.nopLogFileName)\n nopLoggerPath=self.settings.logDir+\"/\"+logFileName\n fileHandler=logging.handlers.RotatingFileHandler(nopLoggerPath, maxBytes=52428800, backupCount=50)\n self.nopLogger.addHandler(fileHandler) \n \n if not hasattr(self,\"mainLogger\"): # Since debug is Singleton, there's a risk that the loggers are initialized multiple times.\n # Format usable for all clasic logs (but not hitLogger.\n #self.logFormatter=logging.Formatter(\"%(asctime)s | %(name)s | %(levelname)s | %(message)s\") # Original formatter from example.\n # Changed in Python version 2.5: funcName was added.\n self.logFormatter=logging.Formatter(\"%(asctime)s | %(module)s | %(lineno)d | %(name)s | %(funcName)s | %(levelname)s | %(message)s\")\n\n #logFileName=self.settings.insertDatetimeIntoFilename(self.settings.get(\"log\",\"mainLogFileName\")) # 025\n #logFileName=self.settings.insertDatetimeIntoFilename(self.settings.mainLogFileName)\n mainLoggerPath=self.settings.logDir+\"/\"+self.settings.mainLogFileName #026 logFileName\n self.mainLogLevel=self.settings.config.getDebugLevel(\"log\",\"mainLogLevel\")\n \n #self.mainLogFileHandler = logging.FileHandler(mainLoggerPath, 'a') # 0.24 replaced by basicConfig - was workaround.\n #self.mainLogFileHandler = logging.handlers.RotatingFileHandler(mainLoggerPath, maxBytes=52428800, backupCount=20)\n #self.mainLogFileHandler=logging.handlers.TimedRotatingFileHandler(mainLoggerPath,when='H',interval=1,backupCount=20)\n \n self.mainLogInterval=1 # 026 When cloning main logger it expands to seconds twice! (If not bypassed like this.)\n #self.mainLogFileHandler=EnhancedRotatingFileHandler(mainLoggerPath,when='H',interval=6,backupCount=50) # 026 stable\n self.mainLogFileHandler=EnhancedRotatingFileHandler(mainLoggerPath,when='midnight',interval=self.mainLogInterval,backupCount=20,maxBytes=52428800) # 026 Experimental\n #self.mainLogFileHandler=EnhancedRotatingFileHandler(mainLoggerPath,when='midnight',interval=1,backupCount=20,maxBytes=1048576) # 026 Beta\n self.mainLogFileHandler.setFormatter(self.logFormatter) # 0.24 replaced by basicConfig\n #self.mainLogFileHandler.maxBytes=1048576#52428800 # 026 with TimedRotatingFileHandler maxBytes doesn't work.\n #self.mainLogFileHandler.backupCount = 20\n # If the fileHandler property was universaly available, it could be used for creating logger in every module.\n comptreeLogLevel=self.settings.config.getDebugLevel(\"log\",\"comptreeLogLevel\")\n\n if not hasattr(self,\"qsize log\"): # qSize logger\n self.qszLogger=logging.getLogger('qsize log')\n self.qszLogger.setLevel(self.settings.config.getDebugLevel(\"log\",\"hitsLogLevel\")) # hitsLogLevel is not a mistake. It is deliberately the same.\n qszLoggerPath=self.settings.logDir+\"/qsize.log\" # 026 Later it should be possible to set-up name or turn it off.\n fileHandler=logging.handlers.TimedRotatingFileHandler(qszLoggerPath,'midnight',1, backupCount=50)\n fileHandler.setFormatter(self.logFormatter)\n self.qszLogger.addHandler(fileHandler)\n\n self.mainLogger=self.enrichMainLogger(\"main log\")\n self.dprintLogger=self.enrichMainLogger(\"dprint l\")\n #self.headerLogger=self.enrichMainLogger(\"header l\") # 026 Overriden by the property\n #self.comptreeLogger=self.enrichMainLogger(\"compTree\",self.mainLogger,comptreeLogLevel)\n #self.comptreeLogger=Debug.ThreadAwareLoger # 026 Overriden by the property\n\n # This will log into std out:\n #streamHandler=logging.StreamHandler()\n #streamHandler.setLevel(logging.DEBUG)\n #streamHandler.setFormatter(formatter) \n #self.mainLogger.addHandler(streamHandler)", "def setupLogger(self):\n self.logger = logging.getLogger('SIMULATOR' + str(self.iSimulatorID))\n self.logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n #add formatter to ch and fh\n\n #fh = logging.FileHandler('log.apistub')\n #fh.setLevel(logging.DEBUG)\n #fh.setFormatter(formatter)\n sh = logging.StreamHandler()\n sh.setLevel(logging.DEBUG)\n sh.setFormatter(formatter)\n\n #self.logger.addHandler(fh)\n self.logger.addHandler(sh)\n self.logger.disabled = BLOGGING_DISABLED", "def setupLogging(loglevel=logging.INFO):\n\n # The following configures two loggers, the root logger and a logger named \"phone_ctlr_log\". Messages sent to the\n # root logger will be sent to the system log using the syslog protocol, and messages to the \"phone_ctlr_log\" logger will\n # be written to the Phone_Agent.log file which will be rotated once the log reaches 1Mb.\n\n configdict = {\n 'version': 1, # Configuration schema in use; must be 1 for now\n #'disable_existing_loggers': True, # Disables all existing logging configurations\n\n 'formatters': {\n 'brief': {\n 'format' : '%(levelname)-8s %(asctime)s (%(created)s) %(message)s',\n 'datefmt': '%Y%m%dT%H%M%S.%Z' },\n 'standard': {\n 'format' : '%(levelname)-8s %(asctime)s %(name)-15s %(message)s',\n 'datefmt': '%Y%m%dT%H%M%S.%Z' },\n 'console': {\n 'format' : '%(levelname)-8s %(asctime)s -- %(message)s',\n 'datefmt': '%Y%m%dT%H%M%S.%Z' },\n 'custom': {\n 'format' : '%(asctime)s - %(message)s',\n 'datefmt': '%Y-%m-%dT%H:%M:%S.%Z' } ### Ex,: 2038-01-01T05:05:02\n },\n\n 'handlers': {'applog': {'class': 'logging.handlers.RotatingFileHandler',\n 'filename': '/opt/tools/phone_agent/Phone_Agent.log',\n #'filename': 'Phone_Agent.log',\n 'backupCount': 3,\n 'formatter': 'custom',\n 'level': 'INFO',\n 'maxBytes': 1024*1024},\n 'conlog': {'class': 'logging.StreamHandler',\n 'formatter': 'console',\n #'stream': 'console',\n 'level': 'DEBUG'},\n 'syslog': {'class': 'logging.handlers.SysLogHandler',\n 'formatter': 'standard',\n 'level': 'ERROR'}},\n\n # Specify all the subordinate loggers\n 'loggers': {\n 'phone_ctlr_log': {\n 'handlers': ['applog']\n },\n 'console_log': {\n 'handlers': ['conlog']\n }\n },\n # Specify properties of the root logger\n 'root': {\n 'handlers': ['syslog']\n },\n }\n\n # Set up configuration\n logging.config.dictConfig(configdict)", "def startlogging(self):\n loglevel = self.config['loglevel']\n logfilelevel = self.config['logfilelevel']\n # -v and -vv options only affect stdout logging\n loglevel = (loglevel, 'debug', 'all')[DEBUG] \n logging.basicConfig(level=self.loglevels[loglevel],\n format=self.config['logformat'],\n datefmt='%H:%M:%S')\n logging.addLevelName(5, 'ALL')\n # now define a logging handler for stdout\n logfile = logging.FileHandler('tangled.log')\n logfile.setLevel(self.loglevels[logfilelevel])\n formatter = logging.Formatter(self.config['logformat'], \n self.config['datefmt'])\n logfile.setFormatter(formatter)\n logging.getLogger('').addHandler(logfile)\n logging.info('New logging session at level {}'.format(loglevel))", "def init_logging():\n app.logger.addHandler(logging.StreamHandler())\n log_level = app.config['LOG_LEVEL']\n app.logger.setLevel(getattr(logging, log_level))", "def setup_logging():\r\n import ConfigParser # change this to configparser for Python 3\r\n # import logging\r\n import logging.config\r\n global logger\r\n\r\n try:\r\n \tlogging.config.fileConfig(\"celog.conf\")\r\n except ConfigParser.NoSectionError: \r\n\t# if there is no configuration file setup a default configuration\r\n logging.basicConfig(filename='code_extract.log',level= _logging_level,\r\n\t\t\tformat='%(asctime)s %(levelname)s - %(message)s',\r\n\t\t\tdatefmt='%Y %b %d, %a %H:%M:%S'\r\n\t\t\t)\r\n \r\n logger = logging.getLogger('%s' % __name__)\r\n\r\n logger.debug('logger ready')", "def init_logging(app, logger_name: str):\n app.logger.propagate = False\n gunicorn_logger = logging.getLogger(logger_name)\n app.logger.handlers = gunicorn_logger.handlers\n app.logger.setLevel(gunicorn_logger.level)\n # Make all log formats consistent\n formatter = logging.Formatter(\"[%(asctime)s] [%(levelname)s] [%(module)s] %(message)s\", \"%Y-%m-%d %H:%M:%S %z\")\n for handler in app.logger.handlers:\n handler.setFormatter(formatter)\n app.logger.info(\"Logging handler established\")", "def __init__(self):\n\n self._logger = logging.getLogger(__name__)", "def setup_logging():\n logger = logging.getLogger()\n logger.level = logging.DEBUG\n stream_handler = logging.StreamHandler(sys.stdout)\n logger.addHandler(stream_handler)", "def init_logger(self):\n\n if self.args.log_level:\n log_level = getattr(logging, self.args.log_level)\n if coloredlogs:\n coloredlogs.install(level=log_level, fmt=LOG_FMT)\n else:\n logging.basicConfig(level=log_level)\n ch = logging.StreamHandler()\n formatter = logging.Formatter(LOG_FMT)\n ch.setFormatter(formatter)\n elif coloredlogs:\n coloredlogs.install(level='INFO', fmt=LOG_FMT)\n\n if coloredlogs:\n effective_level = coloredlogs.get_level()\n else:\n effective_level = logger.getEffectiveLevel()\n\n # make sure warning and error display at any effective level\n if effective_level > logging.WARNING:\n self.warning = logger.critical\n else:\n self.warning = logger.warning\n\n if effective_level > logging.ERROR:\n self.error = logger.critical\n else:\n self.error = logger.error\n\n self.info = logger.info\n self.debug = logger.debug\n self.exception = logger.exception\n self.critical = logger.critical", "def init_log_file(self):\r\n try:\r\n os.makedirs(config[\"server_log_path\"])\r\n except OSError:\r\n if not os.path.isdir(config[\"server_log_path\"]):\r\n raise\r\n server_log_file = logging.FileHandler(\r\n config[\"server_log_path\"] + 'server_log_' + time.strftime('%Y-%m-%d_%H.%M.%S') + '.txt')\r\n server_log_file.setLevel(logging.DEBUG)\r\n server_log_file.setFormatter(file_formatter)\r\n server_log.addHandler(server_log_file)", "def init_logger():\n logpath = Path(f\"logs/{time.strftime('%Y.%m.%d %H:%M')}.txt\")\n logpath.parent.mkdir(exist_ok=True)\n logging.basicConfig(filename=logpath, level=logging.DEBUG)", "def setup_log(self):\n self.logger, _ = get_logger(\"datatransform\")", "def setup_logs(arg_log_dir, log_level='debug'):\n assert log_level.lower() in ('debug', 'info', 'warning', 'error', 'critical')\n global logger\n cl_logger = log.LogManager(app_name=APP_NAME,\n log_name=__name__,\n log_dir=arg_log_dir)\n logger = cl_logger.logger\n logger.setLevel(log_level.upper())", "def start_logging() -> logging.RootLogger:\r\n # Defines the format of the logged messages.\r\n log_format = \"%(levelname)s | %(asctime)s | %(message)s\"\r\n # Configures logging, logs all messages >= 20 (INFO).\r\n logging.basicConfig(filename=app.config[\"log_file_name\"],\r\n format=log_format,\r\n level=logging.INFO)\r\n # Handle on the logger.\r\n logger = logging.getLogger()\r\n return logger", "def __init__(self):\n\n self.__logger = logging.getLogger()\n\n formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: ' +\n '%(message)s')\n\n file_handler = RotatingFileHandler('.log', 'a', 1000000, 1)\n\n file_handler.setLevel(logging.DEBUG)\n file_handler.setFormatter(formatter)\n self.__logger.addHandler(file_handler)\n\n stream_handler = logging.StreamHandler()\n stream_handler.setLevel(logging.INFO)\n self.__logger.addHandler(stream_handler)", "def configure():\n # TODO: Simple configuration of what to log and where to log it to\n level_name = getenv(\"LOGLEVEL\", \"INFO\")\n level = getattr(logging, level_name)\n logging.basicConfig(stream=sys.stdout, filemode=\"w\", level=level)\n\n for handler in logging.root.handlers:\n handler.addFilter(Whitelist(\"mara\", \"tests\"))", "def init_log(log_level=logging.DEBUG):\n now = time.time()\n ts = datetime.datetime.fromtimestamp(now).strftime('%Y%m%d')\n file_name = os.path.abspath(os.path.join(os.getcwd(), '..', 'traffic_logs', f'{ts}_traffic.log'))\n folder, _ = os.path.split(file_name)\n Path(folder).mkdir(parents=True, exist_ok=True)\n\n # create formatter and add it to the handlers\n log_format = '[%(asctime)s][%(name)s][%(levelname)s] %(message)s'\n\n logging.basicConfig(filemode='a',\n format=log_format,\n datefmt='%H:%M:%S',\n level=logging.ERROR,\n stream=sys.stdout,\n # filename=file_handler\n )\n\n formatter = logging.Formatter(log_format)\n\n # create file handler which logs even debug messages\n file_handler = logging.FileHandler(file_name)\n file_handler.setFormatter(formatter)\n file_handler.setLevel(log_level)\n\n std_out = logging.StreamHandler(sys.stdout)\n std_out.setFormatter(formatter)\n std_out.setLevel(log_level)\n\n # This for avoiding streams to log to root's stderr, which prints in red in jupyter\n root_logger = logging.getLogger()\n for handler in root_logger.handlers:\n # continue\n root_logger.removeHandler(handler)\n\n # add the handlers to the logger\n root_logger.addHandler(file_handler)\n\n # By default the install() function installs a file_handler on the root root_logger,\n # this means that log messages from your code and log messages from the\n # libraries that you use will all show up on the terminal.\n coloredlogs.install(level=log_level, fmt=log_format, stream=sys.stdout)", "def setup_root_logger(checkpoint_dir, rank, debug):\n # log file prepare\n log_dir = checkpoint_dir / 'log'\n try:\n log_dir.mkdir(mode=0o755)\n except FileExistsError:\n pass\n log_file = log_dir / 'rank{}.log'.format(rank)\n\n basic_handler = logging.FileHandler(log_file)\n basic_handler.setLevel(logging.DEBUG if debug else logging.INFO)\n handlers = [basic_handler]\n\n stdout_handler = logging.StreamHandler(sys.stderr)\n stdout_handler.setLevel(logging.WARNING)\n handlers.append(stdout_handler)\n\n if rank == 0:\n critical_handler = logging.FileHandler(log_dir / 'critical.log')\n critical_handler.setLevel(logging.WARNING)\n handlers.append(critical_handler)\n\n package_name = __name__.split('.')[0]\n root_logger = logging.getLogger(package_name)\n formatter = logging.Formatter(\n f'%(asctime)s[%(name)s]-%(levelname)s-%(message)s',\n datefmt='%H:%M:%S',\n )\n root_logger.setLevel(logging.DEBUG)\n for handler in handlers:\n handler.setFormatter(formatter)\n root_logger.addHandler(handler)\n\n # Capture python's builtin warnings message\n logging.captureWarnings(True)\n redirect_stderr()\n\n logger.warning('Version {} >>> time {} >>> running on {} >>> slurm job id: {}'.format(\n __version__, time.asctime(), socket.gethostname(), slurm.job_id,\n ))", "def init_logging(self, log_level):\n self.logger = logging.getLogger(__name__)\n self.logger.setLevel(log_level)\n formatter = logging.Formatter('%(asctime)s %(name)s [%(levelname)s] '\n '%(message)s')\n ch = logging.StreamHandler()\n ch.setLevel(log_level)\n ch.setFormatter(formatter)\n self.logger.addHandler(ch)\n self.logger.debug('logging initialized')", "def setup_global_logging():\n\n global global_logging_started\n\n if global_logging_started:\n return\n\n orig_logger_class = logging.getLoggerClass()\n logging.setLoggerClass(StreamTeeLogger)\n try:\n stdout_logger = logging.getLogger(__name__ + '.stdout')\n stderr_logger = logging.getLogger(__name__ + '.stderr')\n finally:\n logging.setLoggerClass(orig_logger_class)\n\n stdout_logger.setLevel(logging.INFO)\n stderr_logger.setLevel(logging.ERROR)\n stdout_logger.set_stream(sys.stdout)\n stderr_logger.set_stream(sys.stderr)\n sys.stdout = stdout_logger\n sys.stderr = stderr_logger\n\n exception_logger = logging.getLogger(__name__ + '.exc')\n sys.excepthook = LoggingExceptionHook(exception_logger)\n\n logging.captureWarnings(True)\n\n rawinput = 'input'\n builtins._original_raw_input = getattr(builtins, rawinput)\n setattr(builtins, rawinput, global_logging_raw_input)\n\n global_logging_started = True" ]
[ "0.75551504", "0.72284514", "0.7111983", "0.709306", "0.70296115", "0.7023245", "0.6968554", "0.6962373", "0.69305164", "0.69274163", "0.69044024", "0.68876123", "0.68429", "0.68380857", "0.6832595", "0.68176967", "0.67931634", "0.6735474", "0.67295617", "0.672686", "0.67145145", "0.67092866", "0.67053854", "0.67046857", "0.6689958", "0.66776764", "0.6676046", "0.6663557", "0.6657567", "0.66559035", "0.66462237", "0.66451925", "0.66281736", "0.6611449", "0.6599608", "0.6576805", "0.6575893", "0.65649444", "0.6560976", "0.6556723", "0.65564907", "0.65556556", "0.6551763", "0.6551629", "0.6549969", "0.6543723", "0.65390146", "0.6535309", "0.6534919", "0.6529421", "0.6524952", "0.6524016", "0.6522163", "0.65118843", "0.65081453", "0.6504311", "0.6503912", "0.648415", "0.6479009", "0.64718574", "0.6466185", "0.6444685", "0.6443758", "0.6442001", "0.6435505", "0.6434808", "0.641103", "0.6410125", "0.63974726", "0.63902825", "0.6387898", "0.6381884", "0.63772655", "0.6366849", "0.6361435", "0.6359828", "0.63592273", "0.6358904", "0.6358851", "0.6354057", "0.6352788", "0.63497615", "0.63369787", "0.63297975", "0.63282675", "0.6319374", "0.63183916", "0.63147247", "0.62922484", "0.62849474", "0.62832403", "0.6271464", "0.62710094", "0.62695473", "0.6266594", "0.6266189", "0.62641376", "0.6253554", "0.6238094", "0.6235518" ]
0.66258967
33
Returns all users from test_data in a json format that's compatible with jqGrid.
def users_json(self, rows=None, sidx=None, _search=None, searchField=None, searchOper=None, searchString=None, page=None, sord=None, nd=None): # 1 line # 2 lines t1 = time.clock() header = ["value", "flags", "source", "evidence_type", "creation_time", "time", "useby", "owner", "comment"] # 3 lines reslist = [] genshi_tmpl = LoadGenshiTemplate(cherrypy.session.get('cur_session'), cherrypy.session.get('username')) cur_component = cherrypy.session.get('cur_component') cur_context = cherrypy.session.get('cur_context') if cur_component != 'None': #print "getting new" context = cur_context.split() um = cherrypy.session.get('um') reslist = um.get_evidence_new(context, cur_component) cherrypy.session['cur_component'] = 'None' else: #print "getting default" cherrypy.session['cur_component'] = 'firstname' reslist = um.get_evidence_new() #users_list = test_data_to_list(test_data) # 4 lines evdlist = [] i = 0 #{'comment': None, 'evidence_type': 'explicit', 'creation_time': 1322914468.889158, 'value': 'Bob', #'source': 'Jane', 'flags': [], 'time': None, 'owner': 'Jane', 'objectType': 'Evidence', 'useby': None} myEvd = [] if type(reslist) is ListType: for res in reslist: print "Inside user_json " myEvd = [0]*10 myEvd[0] = i for key, value in res.__dict__.items(): #print "%s:%s"%(key, value) for item in header: if item == key: #print "key: %s %s--"%(item,key) if key == 'creation_time' or key == 'time' or key == 'useby': if value: import datetime value = datetime.datetime.fromtimestamp(int(value)).strftime('%d/%m/%Y %H:%M:%S') elif key == 'flags': if value: value = ''.join(value) else: value="None" __index = header.index(item) #print "%s in %d" %(value,__index+1) myEvd[__index+1]=value evdlist.append(myEvd) i = i+1 #print "Evidence: %d" %i #for val in myEvd: # print val import my_jqGrid result_page = my_jqGrid.jqgrid_json(self, evdlist, header, rows=rows, sidx=sidx, _search=_search, searchField=searchField, searchOper=searchOper, searchString=searchString, page=page, sord=sord) t2 = time.clock() print 'user-json took %0.3fms' % ((t2-t1)*1000.0) write_log('notice','Show evidence list operation successful') return result_page else: #print reslist e = reslist write_log('error','Show evidence list Operation Failed; Error:'+str(e)) modeltree = cherrypy.session.get('modeltree') return genshi_tmpl.greeting_template(e, "Evidencelist upload", modeltree)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getUsersData(self):\n users_data = self.get_api_results(\n \"/api/user/list?fields=id,username,name,phone,email,url,about,role,joined,lastactive,avatar,company,position,location&api_key={0}&format=json\")\n for user in users_data:\n if user[\"name\"] not in self.users:\n self.add_user(user)\n else:\n self.merge_user(user)\n return users_data", "def get_users():\n selection = []\n try:\n selection = [{'id':usr.id, 'username':usr.username, 'email':usr.email} \n for usr in User.query.all()]\n except:\n selection = {'error':True}\n return json.dumps(selection)", "def get_users(self):\n fields = ['name', ]\n return self.get_data(\"myUsers\", fields)", "def get_users():\n return jsonify([\n users.to_dict()\n for users in models.storage.all('User').values()\n ])", "def get_all_users(session, data):\n\n all_users = None\n user_data = []\n\n page = 1\n per_page = 10\n\n email_recipients = []\n\n all_users = session.query(UsersAuthModel).all()\n\n if 'offset' in data.keys() and 'limit' in data.keys():\n page = data.get('offset')\n per_page = data.get('limit')\n\n all_users = session.query(UsersAuthModel).paginate(page=page, per_page=per_page, error_out=False).all()\n\n for user_rs in all_users:\n id_user = user_rs.user_id\n username = user_rs.user_name\n password = user_rs.password\n is_active = user_rs.is_active\n is_staff = user_rs.is_staff\n is_superuser = user_rs.is_superuser\n creation_date = user_rs.creation_date\n last_update_date = user_rs.last_update_date\n\n user_data += [{\n \"AuthUser\": {\n \"Id\": id_user,\n \"Username\": username,\n \"Password\": password,\n \"IsActive\": is_active,\n \"IsStaff\": is_staff,\n \"IsSuperuser\": is_superuser,\n \"CreationDate\": creation_date,\n \"LastUpdateDate\": last_update_date\n }\n }]\n\n if bool(is_active) and bool(is_superuser):\n email_recipients.append(username)\n\n cfg_app.email_recipients = email_recipients\n\n return json.dumps(user_data)", "def get_users():\n users = User.query # no need to order\n users_data = [user.to_dict() for user in users.all()]\n return jsonify(users=users_data)", "def get_all_users():\n users = []\n for mv in storage.all(\"User\").values():\n users.append(mv.to_dict())\n return jsonify(users)", "def all_Users():\n new_dict = []\n for usr in storage.all('User').values():\n new_dict.append(usr.to_dict())\n return jsonify(new_dict)", "def fetch_all_users():\n url = \"{}/workspace/{}/users\".format(V1_API_URL, WORKSPACE_ID)\n responses = requests.get(url, headers=HEADERS)\n return [\n {\n \"acronym\": user[\"name\"].lower(),\n \"clockify_id\": user[\"id\"],\n \"email\": user[\"email\"].lower(),\n }\n for user in responses.json()\n ]", "def json(self):\n result = {}\n for user in self.users:\n result[user.user_id] = user.json\n return result", "def user_list(ctx):\n data = ctx.obj.get_all_users()\n output_json_data(data)", "def get_all_users():", "def get_all_user():\n user = UserModel.objects()\n return jsonify(user), 200", "def get_users():\n users = storage.all('User')\n users_list = []\n for user in users.values():\n users_list.append(user.to_dict())\n return jsonify(users_list), 200", "def collect_items_user_dict(self, test_data):\n items_to_fill = {}\n for row in test_data:\n user = row[0]\n item = row[1]\n if item not in items_to_fill:\n items_to_fill[item] = []\n items_to_fill[item] += [user.item()]\n\n return items_to_fill", "def get_users():\n users = User.query.order_by(User.id).all()\n users = {user.id: user.username for user in users}\n\n response = jsonify({\"success\": True, \"users\": users})\n\n return response", "def show_users(self):\n\n u = User(self.settings)\n users_list = u.find_users()\n\n # transform the results in a \"jsonifiable\"-form\n json_results = []\n for user in users_list:\n json_results.append(user.to_json())\n\n # return\n return json_results", "def test_fetch_all_user(self):\n\n payload = self.get_req('api/v1/users')\n self.assertEqual(payload.status_code, 200)\n self.assertEqual(payload.json['users'], [])", "def test_user_list(self):\r\n self._add_demo_import()\r\n params = {\r\n 'api_key': self.api_key\r\n }\r\n res = self.testapp.get('/api/v1/a/users/list',\r\n params=params,\r\n status=200)\r\n\r\n # we should get back dict of count, users.\r\n data = json.loads(res.body)\r\n\r\n self.assertEqual(\r\n 1, data.get('count'), \"There are none by default. \" + res.body)\r\n self.assertEqual(\r\n 'admin',\r\n data.get('users')[0]['username'],\r\n \"The first user is from admin \" + res.body)\r\n self.assertEqual(\r\n 'testing@dummy.com',\r\n data.get('users')[0]['email'],\r\n \"The first user is from testing@dummy.com \" + res.body)", "def get_all_users(request):\n users = User.objects.all().values('id', 'first_name', 'last_name', 'username', 'email', 'is_superuser', 'is_active')\n data = []\n for user in users:\n data.append({\n 'user_id':user['id'],\n 'username':user['username'],\n 'first_name':user['first_name'],\n 'last_name':user['last_name'],\n 'is_enabled':user['is_active'],\n 'email':user['email'],\n 'is_admin':user['is_superuser']\n })\n response = {\n 'status':0,\n 'status_message':'Success',\n 'users':data\n }\n return HttpResponse(json.dumps(response))", "def user_ret():\n user_list = []\n all_objs = storage.all(\"User\")\n for obj in all_objs.values():\n user_list.append(obj.to_dict())\n return jsonify(user_list)", "def get_user_list(self, connection):\n http = get_web_service(connection)\n try:\n req = http.request('GET', connection[\"url\"] + '/users/?_format=json')\n data = json.loads(req.data.decode('utf-8'))\n # print(json.dumps(data, indent=4, sort_keys=True))\n return data\n except urllib3.exceptions.HTTPError as e:\n print(\"Connection error\")\n print(e)", "def get_all_users():\n return jsonify(admin.get_all_users(current_app.scoped_session()))", "def get_users(request):\n\n users_list = User.objects.all().values(\n 'id', 'username', 'first_name', 'last_name'\n )\n\n return HttpResponse(json.dumps(\n {'users': list(users_list)}\n ))", "def get_RegisteredUsersList(test_case, override_headers=null, override_cookies=null):\n # type: (AnyMagpieTestCaseType, Optional[HeadersType], Optional[CookiesType]) -> List[Str]\n app_or_url = get_app_or_url(test_case)\n resp = test_request(app_or_url, \"GET\", \"/users\",\n headers=override_headers if override_headers is not null else test_case.json_headers,\n cookies=override_cookies if override_cookies is not null else test_case.cookies)\n json_body = check_response_basic_info(resp, 200, expected_method=\"GET\")\n return json_body[\"user_names\"]", "def all_users():\n # TODO: implement pagination\n # skip = flask.request.args[\"skip\"]\n user_cursor = mongo.db.users.find({}, {\"_id\": False})\n users = []\n for user in user_cursor:\n # pprint(user)\n if \"image_ids\" in user:\n users.append({\n \"username\": user[\"username\"],\n \"images\": [str(im_id) for im_id in user[\"image_ids\"]]\n })\n return flask.jsonify(users)", "def get_users():\r\n page = request.args.get('page', 1, type=int)\r\n per_page = min(request.args.get('per_page', 10, type=int), 100)\r\n data = User.to_collection_dict(User.query, page, per_page, 'api.get_users')\r\n return jsonify(data)", "def get_users():\n users = models.User.query.all()\n friends_json = []\n for u in users:\n user = {\n 'id': u.id,\n 'name': u.name,\n 'email': u.email,\n 'regID': u.regid,\n 'photo': u.photo\n }\n friends_json.append(user)\n return jsonify({'users': friends_json}), 200", "def users_view():\n data = get_data()\n return [{'user_id': i, 'name': 'User {0}'.format(str(i))}\n for i in data.keys()]", "def users_get(self) -> Dict[str, list]:\n self.__logger.debug('Eva.users_get called')\n return self.__http_client.users_get()", "def load_users(self):\n return self.um.read_json(\"users.json\")", "def retrieve_users(payload):\n selection = User.query.order_by(User.id).all()\n users = []\n for item in selection:\n formatted_user = item.format()\n users.append(formatted_user)\n\n return jsonify({\n 'success': True,\n 'total': len(users),\n 'users': users\n })", "def userJSON():\n user = session.query(User).all()\n result = []\n\n for i in user:\n result += [i.serialize]\n\n return jsonify(Users=result)", "def get_users(self):\n query = \"\"\"SELECT firstname,lastname,othernames,email,phonenumber,\\\n username,public_id,isadmin,isactive,registered\\\n from users ORDER BY registered ASC\"\"\"\n conn = self.db\n cursor = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cursor.execute(query)\n rows = cursor.fetchall()\n return rows", "def search_users(self):\n for name in self.userlist:\n dict_json = {}\n x = 0\n users = api.GetUsersSearch(term=name)\n for user in users:\n id_number = \"ID=\" + str(user.id)\n screen_name = \"ScreenName=\" + user.screen_name\n json_str = json.JSONEncoder().encode({\"User\": [id_number, screen_name]})\n dict_json[x] = json_str\n x += 1\n with open(\"Different\" + name + \".json\", \"w\") as outfile:\n json.dump(dict_json, outfile)\n outfile.close()", "def view_users(self):\n con = dbcon()\n cur = con.cursor()\n cur.execute(\"SELECT * FROM my_users\")\n res = cur.fetchall()\n user_list=[]\n for user in res:\n user_det = {\n 'user_id':user[0],\n 'username':user[1],\n 'password':user[2],\n 'confirmpass':user[3],\n 'addres':user[4],\n 'role':user[5]\n }\n user_list.append(user_det)\n return jsonify({'Users': user_list}), 200", "def list_users():\n return json_response(\n status=200,\n response_data={\n \"success\": True,\n \"data\": {\n \"users\": [user.serialize() for user in User.all()]\n }\n }\n )", "def get_users():\n users = User.query.all()\n users_schema = UserSchema()\n result = users_schema.dump(users, many=True)\n return jsonify({'users': result.data})", "def test_get_all_users(self):\n\n email1 = \"pytest_get_user@example.com\"\n self.create_example_user(email1)\n\n email2 = \"pytest_get_user_2@example.com\"\n\n self.create_example_user(email2)\n\n users_get_endpoint_result = user.fetchall(self.database)\n\n verify_query = \"\"\"\n SELECT * FROM USERS;\"\"\"\n self.database.cursor.execute(verify_query)\n\n verify_rows = [r._asdict() for r in self.database.cursor.fetchall()]\n\n assert len(verify_rows) == len(users_get_endpoint_result)\n\n for (email, name, group_name, hashed_password, admin) in [\n (r[\"email\"], r[\"name\"], r[\"group_name\"], r[\"hashed_password\"], r[\"admin\"])\n for r in users_get_endpoint_result\n ]:\n\n self.verify_user_data(email, name, group_name, hashed_password, admin)", "def temp_users():\n temp_user_keys = list(self.redis.scan_iter(self.temp_user_search))\n\n temp_user_data = []\n\n for user_key in temp_user_keys:\n username = user_key.split(':')[1]\n\n user = self.user_manager.all_users[username]\n if not user or not user.get_prop('created_at'):\n continue\n\n temp_user_data.append(user.serialize())\n\n return {'users': temp_user_data}", "def users(self):\n return json.loads(self._cache.get(self._key))", "def test_get_users(self):\n print('(' + self.test_get_users.__name__+')',\n self.test_get_users.__doc__)\n users = self.connection.get_users()\n # Check we get right size of users table\n self.assertEqual(len(users), INITIAL_USERS_COUNT)\n # check PATIENT and DOCTOR data with users object we got\n for user in users:\n if user['username'] == PATIENT_USERNAME:\n self.assertDictContainsSubset(user, PATIENT['public_profile'])\n elif user['username'] == DOCTOR_USERNAME:\n self.assertDictContainsSubset(user, DOCTOR['public_profile'])", "def get_users(self) -> List[Dict[str, Any]]:\n users = self.user_manager.get_users()\n return [\n {\n 'user_id': user.user_id,\n 'username': user.username,\n 'created_at': user.created_at.isoformat(),\n }\n for user in users\n ]", "def test_get_all_users(self):\n created_30_days_ago = datetime.datetime.utcnow() + datetime.timedelta(-30)\n add_user('neilb', 'neilb14@mailinator.com', 'password123', created_30_days_ago)\n add_user('juneau', 'juneau@mailinator.com')\n with self.client:\n response = self.client.get('/users')\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(data['data']['users']),2)\n self.assertTrue('created_at' in data['data']['users'][0])\n self.assertTrue('created_at' in data['data']['users'][1])\n self.assertIn('juneau', data['data']['users'][0]['username'])\n self.assertIn('neilb', data['data']['users'][1]['username'])\n self.assertIn('success', data['status'])", "def display_users():\n users = storage.all(\"User\").values()\n users_list = []\n for obj in users:\n users_list.append(obj.to_dict())\n return jsonify(users_list)", "def users_view():\n users = get_users()\n data = get_data()\n result = [{'user_id': i, 'name': users[i]}\n for i in users.keys() if int(i) in data.keys()]\n #import pdb; pdb.set_trace()\n result.sort(key=lambda item: item['name'], cmp=locale.strcoll)\n return result", "def list_users():\n return jsonify(user=\"joe\")", "def get_users():\n cache_key = 'GRAHAM_API_CACHED_USERS'\n cached = rd.get(cache_key)\n if cached is not None:\n return jsonify(json.loads(cached.decode('utf-8')))\n ret = []\n for user in User.select():\n ret.append({\n 'discord_id':user.user_id,\n 'user_name':user.user_name,\n 'created_ts': format_js_iso(user.created),\n 'address':user.wallet_address\n })\n rd.set(cache_key, json.dumps(ret), ex=600)\n return jsonify(ret)", "def getAllUsers(self):\n ret = []\n\n users = User.getAll()\n\n for e in users:\n ret.append(e.getAsDict())\n\n return {\"users\" : ret}", "def user_list():\n users = User.objects.all()\n return {\"users\": users}", "def get_users(self):\r\n\t\tlogger.debug(\"Fetch users\")\r\n\t\t\r\n\t\treturn login.get_users()", "def show_users():\n users_list = []\n all_users = storage.all('User')\n for obj in all_users.values():\n users_list.append(obj.to_dict())\n return jsonify(users_list)", "def users_no_id_get():\n all_users = []\n for user in storage.all(\"User\").values():\n all_users.append(user.to_dict())\n return jsonify(all_users)", "def test_get_users(self):\n pass", "def get(self):\n\n users = [marshal(user, user_fields) for user in models.ExerciseUser.select()]\n\n return users", "def read_all():\n # Create the list of users from our data\n users = User.query.order_by(User.first_name).all()\n\n # Serialize the data for the response\n user_schema = UserSchema(many=True)\n data = user_schema.dump(users)\n return data", "def test_get_all_users(self):\n api.user.create(\n username='chuck',\n email='chuck@norris.org',\n password='secret',\n )\n users = [user.getUserName() for user in api.user.get_users()]\n\n self.assertEqual(users, ['chuck', TEST_USER_NAME])", "def test_001_get_users(self, mock_db_query):\n mock_db_query.all.return_value = [seller1, seller2]\n\n response = self.app.get('/v1/users', headers={'accept': 'application/json'})\n\n print(response.get_data().decode())\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.json), 2)", "def get(self):\n users = User.query.all()\n usersJSON = []\n for u in users:\n usersJSON.append({'id':u.id, 'admin':u.admin})\n return { 'users' : usersJSON }", "def get_users(self):\n return self.execute(TABELLE['users']['select']['all'])", "def users(self, per_page=None, page=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/{1}'.format(self.get_url(), 'users')\r\n return http.Request('GET', url, params), parsers.parse_json", "def users(self):\n return self.get_data(\"users\")", "def test_listuser():\n url = baseUrl + userurl + listurl\n logging.info(\"List users\")\n r = requests.get(url, headers=header)\n assert r.status_code == 200\n resp = r.json()\n global user_ids\n user_ids = []\n if resp is None:\n pass\n else:\n user_num = len(resp)\n for k in range(0, user_num):\n assert resp[k]['subscriptionIds'][0] == subscriptionid\n if resp[k][\"isActive\"] is True:\n user_ids.append(resp[k][\"id\"])\n print (user_ids)\n assert user_id in user_ids", "def _list_users(self):\n users = fileIO.load_json(\"users.json\")\n print(\"The list of users is as follows:\")\n for i in users:\n print(users[i][\"name\"])\n self._list_user_settings(users)", "def get_users():\n with open(user_path, \"r\") as infile:\n return json.load(infile)", "def get_all_users():\n all_users = User.query.all()\n \n list_of_all_users = []\n for user in all_users:\n list_of_all_users.append({\n 'user_id' : user.user_id,\n 'username' : user.username,\n 'fname' : user.fname,\n 'lname' : user.lname,\n 'total_articles' : len(user.saved_news)\n })\n \n return list_of_all_users", "def db_users():\n return [\n {\"name\": \"Cathy\", \"email\": \"cathy@\", \"group\": \"guest\", \"password\": \"12345\"},\n {\"name\": \"Marry\", \"email\": \"marry@\", \"group\": \"guest\", \"password\": \"12345\"},\n {\"name\": \"John\", \"email\": \"john@\", \"group\": \"guest\", \"password\": \"12345\"},\n ]", "def get_users_info(): \n \n data = user_obj.get_users_info()\n return data", "def get_users(self):\n # remove some user media fields that we can't submit back\n def clean_media(entry):\n entry.pop(\"mediaid\", None)\n entry.pop(\"userid\", None)\n entry.pop(\"description\", None)\n return entry\n zabbix_users = self.conn.user.get(selectMedias=\"extend\", selectUsrgrps=\"extend\")\n zabbix_users = {user[\"alias\"].lower(): User(\n id=user[\"userid\"],\n name=user[\"name\"],\n surname=user[\"surname\"],\n alias=user[\"alias\"],\n groups=set(g[\"usrgrpid\"] for g in user[\"usrgrps\"]),\n media=[clean_media(entry) for entry in user[\"medias\"]],\n ) for user in zabbix_users}\n return zabbix_users", "def users_list():\n users = User.query.all()\n serialized_objects = users_schema.dumps(users, sort_keys=True, indent=4)\n\n return Response(\n response=serialized_objects,\n status=http_status.OK,\n mimetype=\"application/json\"\n )", "def _get_userlist_by_userright(self, userright):\n params = {\n \"action\": \"query\",\n \"list\": \"allusers\",\n \"format\": \"json\",\n \"augroup\": userright,\n \"aulimit\": \"500\",\n }\n r = self.session.get(ENWIKI_API, params=params)\n data = r.json()\n return [u[\"name\"] for u in data[\"query\"][\"allusers\"]]", "def getactiveusersdata(self, startdate, enddate):\n try:\n\n select_activeusers = (\n \"SELECT DISTINCT username FROM public.jobs \"\n \"WHERE latestjobversion = True AND insertdate BETWEEN Date(%s) AND Date(%s) \"\n \"AND username NOT IN ('amr','Amr.Hassan@swin.edu.au','yfenner','luke','ldeslandes') \"\n )\n\n self.pgcursor.execute(select_activeusers, (startdate, enddate))\n\n activeusers = {}\n usernames = self.pgcursor.fetchall()\n\n select_userdata = (\n \"SELECT gender, institution, country, is_student FROM tao_taouser \"\n \"WHERE username = %s \"\n )\n\n for (username) in usernames:\n\n self.mysqlcursor.execute(select_userdata, username)\n\n userdata = self.mysqlcursor.fetchone()\n if userdata is not None:\n # for (gender, institution, country, is_student) in userdata:\n # print(\"{0},{1},{2},{3},{4}\".format(username, gender, institution, country, is_student))\n print(\"{0},{1},{2},{3},{4}\".format(username[0], userdata[0], userdata[1], userdata[2], userdata[3]))\n\n except Exception as exp:\n raise (exp)\n finally:\n self.finalize()", "def get_every_user():\r\n connect(\"mongodb://vcm-3594.vm.duke.edu:27017/heart_rate_app\")\r\n user_list = get_all_users()\r\n return_dict = {\r\n \"user_emails\": user_list\r\n }\r\n return jsonify(return_dict),200", "def _get_users_list(self):\n return self.users['user_id'].tolist()", "def get_users(self):\n users = []\n page = 1\n while not len(users) % 100:\n users += self._get('/users?{0}'.format(urllib.urlencode({'per_page': 100, 'page': page})))\n if not users:\n break\n page += 1\n return users", "def user_list(request):\n if request.method == 'GET':\n user_info = UserData.objects.all()\n serializer = UserProfileSerializer(user_info, many=True)\n return JSONResponse(serializer.data)\n else:\n return JSONResponse('Using wrong api.', status=404)", "def get(self):\n\n users = UserModel.get_top_earners()\n users_json = [user.json() for user in users]\n return {\"users\": users_json}", "def getInterestedUsers():", "def parse_users(self, users_json):\n users = []\n for user_json in users_json:\n users.append(User(user_json))\n return users", "def list():\n try:\n data = table.scan()\n users = data.get('Items', None)\n if users is None:\n return jsonify({'error': 'Error fetching users'}), 400\n\n resp = {\n 'count': len(users),\n 'users': users\n }\n return jsonify(resp)\n except BaseException as e:\n logger.info('ERROR {}'.format(str(e)))\n return jsonify({'error': str(e)}), 400", "def get(self):\n users = User.query.all()\n usersJSON = []\n for u in users:\n usersJSON.append({'id': u.id, 'admin': u.admin})\n return {'users': usersJSON}", "def test_api_can_get_all_users(self):\n response = self.client().get('/api/v1/user/')\n self.assertTrue(response.status_code, 200)", "def all_users(request):\r\n user = User()\r\n return HttpResponse(json.dumps(user.parseFile()))", "def getBatchedUsersJSON(self, u_ids):\n users = self.api.lookup_users(user_ids=[','.join([str(x) for x in u_ids])])\n return [user._json for user in users]", "def user_for_test(db_setup):\n db_setup.cur.execute(\n \"\"\"\n SELECT row_to_json(u) \n FROM( SELECT id, first_name, last_name, username FROM users WHERE username = %s LIMIT 1)\n u\n \"\"\", (\"test123\",))\n test_user = db_setup.cur.fetchone()[0]\n return test_user", "def list_users(self):\n _url = \"http://\" + self.host_ip + \":35357/v2.0/users\"\n _body = None\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.cloud_admin_info['token_project']}\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\" no response from Server\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\n \"get user list Failed with status %s \" %\n response.status)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"users List : %s\")\n return output[\"users\"]", "def get_users():\n username = request.args.get('username')\n netAdminToolDB = app.config['DATABASE']\n if username != None:\n users = []\n users.append(netAdminToolDB.get_user_name(username))\n else:\n users = netAdminToolDB.get_user()\n\n userList = []\n for user in users:\n uri = url_for('get_user', user_id=user.id,_external=True)\n #role = netAdminToolDB.get_role(user.role_id)\n userList.append({\n 'id': user.id,\n 'uri': uri,\n 'username': user.username,\n 'display_name': user.display_name,\n 'role': user.role_name\n })\n if userList == []:\n return jsonify({'error': 'No users found'}), 404\n\n return jsonify({'users': userList })", "def load_users(self):\n logging.debug(\"Loading users data...\")\n\n # loading videos\n data=requests.get(self.__URL_USERS)\n self.__dataframe_users=pd.DataFrame(data.json())\n\n logging.debug(\"Users data loaded! n=%s\" % self.__dataframe_users.shape[0])\n\n return self.__dataframe_users", "def get_users(self, params=None):\n url = 'users'\n if params:\n url += '?%s' % urllib.urlencode(params)\n resp, body = self.get(url)\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return service_client.ResponseBodyList(resp, body['users'])", "def users(self, query, page=1, per_page=10):\n url = \"/search/users\"\n data = self._search(url, query, page=page, per_page=per_page)\n data[\"results\"] = UserModel.parse_list(data.get(\"results\"))\n return data", "def get_persons(self):\n response = self.do_request('/misc/user/export/json')\n if response:\n return response.json()", "def list_users():\n if not check_content_type():\n return jsonify(status=CONTENT_TYPE_ERROR)\n reqdata = request.json\n if not check_token(reqdata[\"token\"]):\n return jsonify(status=TOKEN_ERROR)\n users = db.session.query(User).all()\n resdata = []\n for user in users:\n resdata.append({\"id\" : user.id, \"login\" : user.login, \"password\" : user.hash_password})\n return jsonify(data=resdata, status=OK_STATUS)", "def user_list(request):\r\n params = request.params\r\n order = params.get('order', None)\r\n limit = params.get('limit', None)\r\n user_list = UserMgr.get_list(order=order, limit=limit)\r\n ret = {\r\n 'count': len(user_list),\r\n 'users': [dict(h) for h in user_list],\r\n }\r\n return _api_response(request, ret)", "def test_getUsers(self):\n\t\turl = \"/users/\"\n\t\tresponse = self.client.get(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)\n\t\tself.assertEqual(response.data[\"count\"], 2)", "def user_list(request_dict):\n users = User.query.all()\n users_list = list()\n for user in users:\n users_list.append(user)\n\n return JSONTools.user_list_reply(users_list)", "def get_all_users(self) -> tuple:\n users_list: dict_list_type = [user.to_dict() for user in UserModel.query().fetch()]\n message: str = 'successfully retrieved active users'\n return jsonify({'status': True, 'payload': users_list, 'message': message}), 200", "def get_list(self, per_page=100):\n url = \"{0}/users\".format(self.base_url)\n url = self._add_token_to_url(url)\n page_current = 0\n users_list = None\n\n self.session.headers.update({\"Content-Type\": \"application/json\"})\n\n while True:\n payload = {\"per_page\": per_page, \"page\": page_current + 1}\n user_list_response = self.session.get(url, params=payload, verify=False).json()\n # user_list_response = self.session.get(url, data=json.dumps(payload), verify=False).json()\n page_total = user_list_response['pagination']['total']\n page_current = user_list_response['pagination']['page']\n if users_list:\n users_list.extend(user_list_response['response'])\n else:\n users_list = user_list_response['response']\n if page_total == page_current:\n break\n return users_list", "def get_users(self):\n res = self.conn.cursor().execute('SELECT id,email,username FROM users')\n return res.fetchall()", "def get_users():\n\n return User.query.all() # [<User user_id=1 fname=Alice lname=Apple>]", "def _load_users(self) -> List[Dict]:\n try:\n api_call = self.web_client.api_call('users.list')\n if api_call.get('ok'):\n return api_call.get('members')\n except Exception:\n LOGGER.exception('Cannot get users')\n raise" ]
[ "0.7094494", "0.69843435", "0.69711816", "0.6943046", "0.6916682", "0.68569654", "0.6854189", "0.68422705", "0.68375057", "0.6822268", "0.6778825", "0.67571205", "0.67559546", "0.67233765", "0.67228365", "0.6666873", "0.66579914", "0.664822", "0.6617304", "0.6607768", "0.6572221", "0.6569828", "0.65564376", "0.6539796", "0.6527385", "0.65173435", "0.6470264", "0.64683205", "0.64642525", "0.6463055", "0.6461183", "0.64586014", "0.64438206", "0.643466", "0.64220595", "0.641419", "0.6398037", "0.63980323", "0.63858414", "0.6361524", "0.63560545", "0.6346467", "0.63347286", "0.63345397", "0.63192725", "0.6306116", "0.62925", "0.62890834", "0.62855077", "0.6282404", "0.6273717", "0.62637174", "0.62622017", "0.6262133", "0.62397665", "0.6238015", "0.6235895", "0.62070036", "0.6199621", "0.6196288", "0.6184665", "0.617104", "0.6164863", "0.61396086", "0.61358833", "0.6135162", "0.6134738", "0.6133293", "0.6125281", "0.6111319", "0.61072314", "0.61068654", "0.6105808", "0.6098374", "0.6068555", "0.6059895", "0.605171", "0.6046763", "0.60422033", "0.6041025", "0.60371184", "0.6030111", "0.6026352", "0.6021257", "0.60197836", "0.60170877", "0.60084486", "0.6008341", "0.60067016", "0.599799", "0.59924126", "0.59792006", "0.59777105", "0.59665495", "0.5966458", "0.5945917", "0.594405", "0.59411776", "0.59357184", "0.5933488" ]
0.6233058
57
Main driver, parse the feed and output the JSON list of sources
def main(): handler = PlanetSourceHandler() parser = xml.sax.make_parser() parser.setFeature(xml.sax.handler.feature_namespaces, 1) parser.setContentHandler(handler) parser.parse(sys.stdin) print simplejson.JSONEncoder(indent=True).encode(handler.sources)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_sources_all():\n response = requests.get(SOURCE_URL)\n json = response.json()\n for source in json['sources']:\n print(u\"{0}: <{1}> {2}\".format(\"News Code\", source['id'], source['name']))", "def listsources():\n\tmain_url = \" https://newsapi.org/v2/sources?apiKey=5f81b593f35d42a8980313250c03d7e7\"\n\n\t# fetching data in json format \n\topen_source = requests.get(main_url).json() \n\n\t# getting all articles in a string sources\n\tsource = open_source[\"sources\"] \n\n\t# empty list which will \n\t# contain all trending newssources \n\tresults = [] \n\t\n\tfor k in source: \n results.append(k[\"id\"])\n \n \t\n\tfor w in results[0:4]:\n print(w)", "def main(feed=None):\n feed_processor = core.FeedProcessor()\n feed_processor(feed_type=feed)\n return feed_processor.feed_json", "def get_sources():\n url = base_url + \"sources\"\n params = {\"language\": \"en\"}\n resp = requests.get(url, params=params)\n data = resp.json()\n sources = [src['id'].strip() for src in data['sources']]\n print(\"all the sources\")\n print(sources)\n return sources", "def get_sources():\n url = base_url + \"sources\"\n params = {\"language\": \"en\"}\n resp = requests.get(url, params=params)\n data = resp.json()\n sources = [src['id'].strip() for src in data['sources']]\n print(\"all the sources:\")\n print(sources)\n return sources", "def main():\n os.makedirs(PATH)\n fetch_data()\n convert_to_json(model_list, 'models.json', is_model=True)\n convert_to_json(backend_list, 'backends.json')\n convert_to_json(type_list, 'types.json')\n convert_to_json(featurizer_list, 'featurizers.json')", "def main():\n\n ## Deal with incoming.\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('-w', '--walk',\n help='Combined report JSON file')\n parser.add_argument('-r', '--remote',\n help='The remote server to map onto')\n parser.add_argument('-o', '--output',\n help='The file to dump the JSON output to')\n parser.add_argument('-v', '--verbose', action='store_true',\n help='More verbose output')\n args = parser.parse_args()\n\n if args.verbose:\n LOG.setLevel(logging.INFO)\n LOG.info('Verbose: on')\n\n ## Ensure directory.\n if not args.walk:\n die_screaming('need a \"walk\" argument')\n LOG.info('Will walk directory: ' + args.walk)\n ## Ensure URL.\n if not args.remote:\n die_screaming('need \"remote\" argument')\n LOG.info('Will map unto URL: ' + args.remote)\n ## Ensure output file.\n if not args.output:\n die_screaming('need an output file argument')\n LOG.info('Will output to: ' + args.output)\n\n ## Walk tree.\n lookup = [];\n for curr_dir, dirs, files in os.walk(args.walk):\n\n ## We can navigate up if we are not in the root.\n relative_to_start = curr_dir.rstrip('//')[len(args.walk):]\n relative_to_start = relative_to_start.lstrip('//')\n LOG.info('curr_dir: ' + curr_dir + ' (' + relative_to_start + ')')\n\n ## Note files and directories.\n for fname in files:\n\n ## Figure out S3 path/key and final filename, keeping in\n ## mind that relative_to_Start can be empty if root.\n webpath = fname\n if relative_to_start:\n webpath = relative_to_start + '/' + fname\n filename = os.path.join(curr_dir, fname)\n\n size = os.path.getsize(filename)\n md5sum = hashlib.md5(open(filename, 'rb').read()).hexdigest()\n\n ## Visual check.\n LOG.info('file: '+ filename + ' -> [' + args.remote + '] / ' + webpath + '; ' + str(size) + ', ' + md5sum)\n\n lookup.append({\n 'url': args.remote + '/' + webpath,\n 'length': str(size),\n 'filename': webpath,\n 'md5': md5sum\n })\n\n ## Final writeout.\n output = json.dumps(lookup, sort_keys=True, indent=4)\n with open(args.output, 'w+') as fhandle:\n fhandle.write(output)\n LOG.info(output)", "def main():\n if len(sys.argv) != 3:\n print(\"Usage: python season-get [year] [season, lowercase]\")\n return None\n\n year = sys.argv[1]\n season = sys.argv[2]\n print(\"Pulling from API\")\n\n API_url = \"http://api.jikan.moe/season/{}/{}\".format(year, season)\n\n # Error checking\n try:\n data = urllib.request.urlopen(API_url)\n except urllib.error.URLError as e:\n print(\"Error - {}\".format(e))\n return None\n print(\"Complete!\\n\")\n\n result = json.loads(data.read())['season']\n\n print(\"Loading IDs\")\n target_name = '{}-{}.txt'.format(year, season)\n target = open('./input/{}'.format(target_name), 'w')\n\n for anime in result:\n target.write(str(anime['mal_id']) + '\\n')\n print(\"Complete!\\n\")", "def main():\n print('<<< Spotify Parser >>>')\n print('-'*101)\n spotify_api.get_json_file(JSON_TRACK_FILE, TRACK, 'track', CLIENT_ID, CLIENT_SECRET)\n parse_json_track(JSON_TRACK_FILE)\n print('-'*101)\n spotify_api.get_json_file(JSON_ARTIST_FILE, ARTIST, 'artist', CLIENT_ID, CLIENT_SECRET)\n parse_json_artist(JSON_ARTIST_FILE)\n print('-'*101)", "def cmd_runner():\n parser = argparse.ArgumentParser(description=\"Get news from extracted sources\")\n parser.add_argument('-s', '--source', help=\"Name of the news source. Available options are: {}\"\n .format(', '.join(\n available_rss_feed_sources_user_prompt\n )\n ))\n parser.add_argument('-t', '--top', help=\"Count of news items to show from top order. Default: 10\")\n parser.add_argument('-v', '--version', action='version', version='%(prog)s {version}'.format(version=__version__))\n\n args = parser.parse_args()\n \n rss_feed_top_count = RSS_FEED_TOP_COUNT\n if args.top:\n if not args.top.isnumeric():\n raise argparse.ArgumentTypeError('Provide a positive integer for --top argument')\n \n else:\n rss_feed_top_count = int(args.top)\n\n if args.source:\n source_name = args.source.lower()\n print('Source argument found, validating...')\n time.sleep(1)\n source_name_is_valid = source_name in available_rss_feed_sources\n \n if not source_name_is_valid:\n print(\n 'Invalid input {0} for source. Available sources are {1} '\n .format(source_name, ', '.join(available_rss_feed_sources_user_prompt)))\n sys.exit()\n \n \n rss_feed_details = {\n 'rss_feed_url': '',\n 'top': rss_feed_top_count,\n 'source': source_name\n }\n rss_feed_url = rss_data_main._get_rss_feed_url_by_source(source_name)\n if not rss_feed_url:\n raise ValueError('RSS url not found for source {}'.format(source_name))\n \n rss_feed_details['rss_feed_url'] = rss_feed_url\n\n \n rss_feed_details['top'] = rss_feed_top_count\n \n print('Getting news...\\n\\n')\n time.sleep(1)\n \n feed_data = rss_feed_main._get(rss_feed_details)\n feed_viewer(feed_data)\n else:\n print('--source argument not provided, getting data for all available sources...')\n rss_feed_details_list = []\n for source_name in available_rss_feed_sources:\n rss_feed_details = {\n 'rss_feed_url': '',\n 'source': source_name,\n 'top': rss_feed_top_count\n }\n rss_feed_url = rss_data_main._get_rss_feed_url_by_source(source_name)\n if not rss_feed_url:\n raise ValueError('RSS url not found for source {}'.format(source_name))\n \n rss_feed_details['rss_feed_url'] = rss_feed_url\n rss_feed_details_list.append(rss_feed_details)\n \n with Pool(AVIALABLE_CPU_COUNT) as p:\n feed_data_list = p.map(rss_feed_main._get, rss_feed_details_list) \n # put list of lists in a single list\n feed_data_flattened_list = []\n for feed_data in feed_data_list:\n feed_data_flattened_list.extend(feed_data) \n\n feed_viewer(feed_data_flattened_list)", "def _parse_sources(self, item):\n return [{'url': item['url']}]", "def main(unused_argv):\n make_dir(FLAGS.raw_dir)\n\n # Get paths of download/extracted training and evaluation files.\n print(\"Downloading data from source\")\n train_files = get_raw_files(FLAGS.raw_dir, constants.TRAIN_DATA_SOURCES)\n eval_files = get_raw_files(FLAGS.raw_dir, constants.EVAL_DATA_SOURCES)", "def main():\n args = get_args()\n\n urls = get_urls(args)\n\n if len(urls) < 1:\n print >> sys.stderr, render_date_iso8601(), \"No valid URLs to process.\"\n exit(1)\n\n outfile = None\n\n if args.output:\n try:\n outfile = open(args.output, 'a' if args.append == True else 'w')\n except IOError as error:\n print >> sys.stderr, render_date_iso8601(), \"Error: opening file:\", args.output, \">>\", url\n\n\n for url in urls:\n if verify_url(url) == True:\n try:\n m3u8_obj = m3u8.load(url)\n if m3u8_obj.is_variant:\n print >> sys.stdout if outfile is None else outfile, render_csv(url, m3u8_obj.playlists)\n else:\n print >> sys.stderr, render_date_iso8601(), \"Error for url:\", url, \"Doesn't contain any stream playlists\"\n except IOError as error:\n print >> sys.stderr, render_date_iso8601(), \"Error for url:\", url, \">>\", error\n else:\n print >> sys.stderr, render_date_iso8601(), \"Error: Not a valid URL >>\", url\n\n\n return 0", "def main(args):\n\n if args['verbose']:\n logging.basicConfig(level=logging.DEBUG)\n else:\n if args['quiet']:\n logging.basicConfig(level=logging.ERROR)\n else:\n logging.basicConfig(level=logging.WARNING)\n\n # unpack args\n\n json_file = args['JSONfile']\n data_dir = args['data_directory']\n temp_file = args['tmp']\n release = args['release']\n\n if json_file:\n json_data = get_json_data(json_file)\n else:\n logging.log(logging.DEBUG, \"Preparing to download JSONfile\")\n if os.path.isfile(temp_file):\n logging.log(logging.WARNING, \"Removing file %s\" % temp_file)\n os.remove(temp_file)\n logging.log(logging.DEBUG, \"Issuing wget for JSON file\")\n args = ['wget', 'https://security-tracker.debian.org/tracker/data/json',\n '-O', temp_file]\n if os.path.isdir('/etc/ssl'):\n if os.path.isdir('/etc/ssl/ca-debian'):\n args.insert(1, '--ca-directory=/etc/ssl/ca-debian')\n call(args)\n logging.log(logging.DEBUG, \"File %s received\" % temp_file)\n json_data = get_json_data(temp_file)\n if os.path.isfile(temp_file):\n logging.log(logging.DEBUG, \"Removing file %s\" % temp_file)\n os.remove(temp_file)\n\n parseJSON(json_data, release)\n parsedirs(data_dir, re.compile('^dsa.+\\.data$'), 2, release)\n parsedirs(data_dir, re.compile('^dla.+\\.data$'), 2, release)\n logging.log(logging.INFO, \"Finished parsing JSON data\")\n printdsas(ovals)", "def collect_data():\n mapping = {'nginx': Nginx,\n 'apache': Apache,\n 'server': Server,\n 'buildout': Buildout}\n with utils.cd(utils.displayer_dir()):\n for dirpath, dirnames, filenames in os.walk('.'):\n # server_id = dirpath\n for json_file in [f for f in filenames if f.endswith('.json')]:\n kind = json_file.split('___')[0]\n filepath = os.path.join(dirpath, json_file)\n logger.debug(\"Loading info from %s\",\n os.path.abspath(filepath))\n json_content = open(filepath).read()\n klass = mapping[kind]\n obj = klass(json_content)\n data[kind][obj.id.lower()] = obj\n # Link buildouts and nginx sites.\n for nginx in data['nginx'].values():\n buildout_id = nginx.data.get('buildout_id')\n if buildout_id is not None:\n buildout = data['buildout'].get(buildout_id)\n if buildout is not None:\n nginx.buildout = buildout\n buildout.site = nginx\n # Link buildouts and apache sites.\n for apache in data['apache'].values():\n buildout_id = apache.data.get('buildout_id')\n if buildout_id is not None:\n buildout = data['buildout'].get(buildout_id)\n if buildout is not None:\n apache.buildout = buildout\n buildout.site = apache\n # Link buildouts+sites with servers.\n for kind in ['nginx', 'apache', 'buildout']:\n for obj in data[kind].values():\n hostname = obj.data.get('hostname')\n if hostname is not None:\n hostname = hostname.lower()\n server = data['server'].get(hostname)\n if server is None:\n logger.error(\"Server with hostname %s not found.\",\n hostname)\n else:\n obj.server = server\n if kind == 'nginx' or kind == 'apache':\n server.sites.append(obj)\n elif kind == 'buildout':\n server.buildouts.append(obj)\n # Link nginx gunicorn ports with servers.\n for kind in ['nginx']:\n for obj in data[kind].values():\n hostname = obj.data.get('hostname')\n port = obj.data.get('proxy_port')\n try:\n port = int(port)\n except:\n pass\n if hostname is not None and port is not None:\n hostname = hostname.lower()\n server = data['server'].get(hostname)\n if server is None:\n logger.error(\"Server with hostname %s not found.\",\n hostname)\n continue\n server.ports[port] = obj", "def _load_sources(self):\n ss_dir = SteelScriptDir('AppResponse', 'files')\n\n for svc in [PACKETS_REPORT_SERVICE_NAME,\n GENERAL_REPORT_SERVICE_NAME]:\n svc_version = self.appresponse.versions[svc]\n sw_version = (self.appresponse.get_info()['sw_version']\n .replace(' ', ''))\n sources_filename = ('{}-sources-{}-{}.pcl'\n .format(svc, svc_version, sw_version))\n sources_file = ss_dir.get_data(sources_filename)\n\n sources_file.read()\n\n if not sources_file.data:\n svcdef = self.appresponse.find_service(svc)\n\n # sources is a list of dictionaries\n sources = svcdef.bind('sources').execute('get').data['items']\n\n # the whole set of sources for current service\n all_sources = {}\n\n for source in sources:\n cols = source['columns']\n source['columns'] = \\\n OrderedDict(sorted(zip(map(lambda x: x['id'], cols),\n cols)))\n source['filters_on_metrics'] = \\\n source['capabilities']['filters_on_metrics']\n if 'granularities' not in source:\n source['granularities'] = None\n\n all_sources[source['name']] = source\n\n if source['name'] in report_source_to_groups:\n self._sources[source['name']] = source\n\n # source_file writes the whole set of sources to disk\n sources_file.data = all_sources\n sources_file.write()\n logger.debug(\"Wrote sources data into {}\"\n .format(sources_filename))\n else:\n logger.debug(\"Loading sources data from {}\"\n .format(sources_filename))\n # Only load valid sources based on settings\n for k, v in sources_file.data.iteritems():\n if k in report_source_to_groups:\n self._sources[k] = v\n\n return", "def get_source(self, results):\n if results:\n json_results = [book['_source'] for book in results]\n return json_results", "def main():\n if config.command == \"list-groups\":\n # Get the list of policies in JSON format for the given network\n if hasattr(config, 'accountSwitchKey'):\n groupList = listGroups(config.accountSwitchKey)\n else:\n groupList = listGroups()\n formatOutputGroupList(groupList, config.output_type)\n\n elif config.command == \"list-connectors\":\n if hasattr(config, 'accountSwitchKey'):\n connectorList = listConnectors(config.accountSwitchKey)\n else:\n connectorList = listConnectors()\n formatOutputConnectorList(connectorList, config.output_type)\n\n elif config.command == \"list-products\":\n if hasattr(config, 'accountSwitchKey'):\n productsList = listProducts(config.accountSwitchKey)\n else:\n productsList = listProducts()\n formatOutputProductList(productsList, config.output_type)\n\n elif config.command == \"list-stream-types\":\n if hasattr(config, 'accountSwitchKey'):\n streamTypeList = listStreamTypes(config.accountSwitchKey)\n else:\n streamTypeList = listStreamTypes()\n formatOutputStreamTypeList(streamTypeList, config.output_type)\n\n elif config.command == \"list-streams\":\n if hasattr(config, 'accountSwitchKey'):\n streamList = listStreams(config.groupid,config.streamstatus,config.accountSwitchKey)\n else:\n streamList = listStreams(config.groupid,config.streamstatus)\n formatOutputStreamList(streamList, config.output_type)\n\n elif config.command == \"list-properties\":\n if hasattr(config, 'accountSwitchKey'):\n propertiesList = listProperties(config.groupid,config.productId,config.accountSwitchKey)\n else:\n propertiesList = listProperties(config.groupid,config.productId)\n formatOutputPropertiesList(propertiesList, config.output_type)\n\n elif config.command == \"list-error-streams\":\n if hasattr(config, 'accountSwitchKey'):\n errorstreamList = listErrorStreams(config.groupid,config.accountSwitchKey)\n else:\n errorstreamList = listErrorStreams(config.groupid)\n formatOutputErrorStreamList(errorstreamList, config.output_type)\n\n elif config.command == \"create\":\n # Opening JSON file\n f = open(config.file.name,'r')\n data = json.load(f)\n json_string = json.dumps(data) #Very Important since when you read it will be in single quotes, it need to be dumped to json and strings are only valid only in double quotes\n\n if hasattr(config, 'accountSwitchKey'):\n createResponse = createStream(json_string,config.accountSwitchKey)\n else:\n createResponse = createStream(json_string)\n formatOutputActDeactResp(createResponse)\n\n elif config.command == \"update\":\n # Opening JSON file\n f = open(config.file.name,'r')\n data = json.load(f)\n json_string = json.dumps(data) #Very Important since when you read it will be in single quotes, it need to be dumped to json and strings are only valid only in double quotes\n print(json_string)\n if hasattr(config, 'accountSwitchKey'):\n updateResponse = updateStream(json_string,config.streamid,config.accountSwitchKey)\n else:\n updateResponse = updateStream(json_string,config.streamid)\n formatOutputActDeactResp(updateResponse)\n\n\n elif config.command == \"get-stream\":\n if hasattr(config, 'accountSwitchKey'):\n streamDetail = getStream(config.streamid,config.accountSwitchKey)\n else:\n streamDetail = getStream(config.streamid)\n formatOutputStreamDetail(streamDetail, config.output_type)\n\n elif config.command == \"activation-history\":\n if hasattr(config, 'accountSwitchKey'):\n activationHistory = getStreamActHistory(config.streamid,config.accountSwitchKey)\n else:\n activationHistory = getStreamActHistory(config.streamid)\n formatOutputActHistory(activationHistory, config.output_type)\n\n elif config.command == \"stream-history\":\n if hasattr(config, 'accountSwitchKey'):\n streamHistory = getStreamHistory(config.streamid,config.accountSwitchKey)\n else:\n streamHistory = getStreamHistory(config.streamid)\n formatOutputStreamHistory(streamHistory, config.output_type)\n\n elif config.command == \"list-datasets\":\n if hasattr(config, 'accountSwitchKey'):\n datasetList = getDatasets(config.template,config.accountSwitchKey)\n else:\n datasetList = getDatasets(config.template)\n formatOutputDatasetList(datasetList, config.output_type)\n\n elif config.command == \"activate\":\n if hasattr(config, 'accountSwitchKey'):\n activateResponse = activateStream(config.streamid,config.accountSwitchKey)\n else:\n activateResponse = activateStream(config.streamid)\n formatOutputActDeactResp(activateResponse)\n\n elif config.command == \"deactivate\":\n if hasattr(config, 'accountSwitchKey'):\n deactivateResponse = deActivateStream(config.streamid,config.accountSwitchKey)\n else:\n deactivateResponse = deActivateStream(config.streamid)\n formatOutputActDeactResp(deactivateResponse)\n\n elif config.command == \"delete\":\n if hasattr(config, 'accountSwitchKey'):\n deleteResponse = deleteStream(config.streamid,config.accountSwitchKey)\n else:\n deleteResponse = deleteStream(config.streamid)\n formatOutputActDeactResp(deleteResponse)", "def main():\n try:\n init_file = open('keywords.json', 'r')\n init_file.close()\n except IOError:\n copy2('keywords.base', 'keywords.json')\n try:\n init_file = open('rsslist.json', 'r')\n init_file.close()\n except IOError:\n copy2('rsslist.base', 'rsslist.json')\n \n\n config_file = 'config.ini'\n config_section = 'dev'\n slack_token = load_config(config_file, config_section)\n slack_client = SlackClient(slack_token)\n feed_count = len(feed_db)\n feed_counter = feed_count\n while feed_counter > 0:\n url = feed_db.get(doc_id = feed_counter)['url']\n last_update_obj = feed_db.get(doc_id = feed_counter)['lastupdate']\n post_list, published_date = getfeed(url, last_update_obj)\n feed_counter = feed_counter - 1\n print(post_list)\n post_lastUpdate(url, published_date)\n post_to_slack(slack_client, post_list)", "def run(self, source, **kwargs):\n kwargs['output'] = self.__graph__()\n if isinstance(source, str):\n import json\n source = json.loads(source)\n self.source = source\n super(JSONProcessor, self).run(**kwargs)\n self.output = kwargs['output']\n return output", "def main():\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n get_user_config()", "def process(sources, output, force):\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s [%(levelname)s] - %(message)s', datefmt=\"%H:%M:%S\")\n\n logging.getLogger('shapely.geos').setLevel(logging.WARNING)\n logging.getLogger('Fiona').setLevel(logging.WARNING)\n logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARNING)\n requests.packages.urllib3.disable_warnings()\n # logging.getLogger('processing').setLevel(logging.DEBUG)\n\n catalog_features = []\n failures = []\n path_parts_to_skip = utils.get_path_parts(sources).index(\"sources\") + 1\n success = True\n for path in utils.get_files(sources):\n try:\n logging.info(\"Processing \" + path)\n pathparts = utils.get_path_parts(path)[path_parts_to_skip:]\n pathparts[-1] = pathparts[-1].replace('.json', '.geojson')\n\n outdir = os.path.join(output, *pathparts[:-1], pathparts[-1].replace('.geojson', ''))\n outfile = os.path.join(output, *pathparts)\n\n source = utils.read_json(path)\n urlfile = urlparse(source['url']).path.split('/')[-1]\n \n if not hasattr(adapters, source['filetype']):\n logging.error('Unknown filetype ' + source['filetype'])\n failures.append(path)\n continue\n \n read_existing = False\n if os.path.isfile(outfile):\n logging.info(\"Output file exists\")\n if os.path.getmtime(outfile) > os.path.getmtime(path):\n logging.info(\"Output file is up to date\")\n if not force:\n read_existing = True\n logging.warning('Skipping ' + path + ' since generated file exists. Use --force to regenerate.') \n else:\n logging.info(\"Output is outdated, {} < {}\".format(\n datetime.datetime.fromtimestamp(os.path.getmtime(outfile)),\n datetime.datetime.fromtimestamp(os.path.getmtime(path))))\n\n if read_existing:\n with open(outfile, \"rb\") as f:\n geojson = json.load(f)\n properties = geojson['properties']\n else:\n logging.info('Downloading ' + source['url'])\n \n try:\n fp = utils.download(source['url'])\n except IOError:\n logging.error('Failed to download ' + source['url'])\n failures.append(path)\n continue\n \n logging.info('Reading ' + urlfile)\n \n if 'filter' in source:\n filterer = BasicFilterer(source['filter'], source.get('filterOperator', 'and'))\n else:\n filterer = None\n \n try:\n geojson = getattr(adapters, source['filetype'])\\\n .read(fp, source['properties'],\n filterer=filterer,\n layer_name=source.get(\"layerName\", None),\n source_filename=source.get(\"filenameInZip\", None))\n except IOError as e:\n logging.error('Failed to read ' + urlfile + \" \" + str(e))\n failures.append(path)\n continue\n except zipfile.BadZipfile as e:\n logging.error('Unable to open zip file ' + source['url'])\n failures.append(path)\n continue\n finally:\n os.remove(fp.name)\n if(len(geojson['features'])) == 0:\n logging.error(\"Result contained no features for \" + path)\n continue\n excluded_keys = ['filetype', 'url', 'properties', 'filter', 'filenameInZip']\n properties = {k:v for k,v in list(source.items()) if k not in excluded_keys}\n properties['source_url'] = source['url']\n properties['feature_count'] = len(geojson['features'])\n logging.info(\"Generating demo point\")\n properties['demo'] = geoutils.get_demo_point(geojson)\n \n geojson['properties'] = properties\n \n utils.make_sure_path_exists(os.path.dirname(outfile))\n\n #cleanup existing generated files\n if os.path.exists(outdir):\n rmtree(outdir)\n filename_to_match, ext = os.path.splitext(pathparts[-1])\n output_file_dir = os.sep.join(utils.get_path_parts(outfile)[:-1])\n logging.info(\"looking for generated files to delete in \" + output_file_dir)\n for name in os.listdir(output_file_dir):\n base, ext = os.path.splitext(name)\n if base == filename_to_match:\n to_remove = os.path.join(output_file_dir, name)\n logging.info(\"Removing generated file \" + to_remove)\n os.remove(to_remove)\n\n utils.write_json(outfile, geojson)\n\n logging.info(\"Generating label points\")\n label_geojson = geoutils.get_label_points(geojson)\n label_path = outfile.replace('.geojson', '.labels.geojson')\n utils.write_json(label_path, label_geojson)\n\n logging.info('Done. Processed to ' + outfile)\n \n if not \"demo\" in properties:\n properties['demo'] = geoutils.get_demo_point(geojson)\n\n properties['path'] = \"/\".join(pathparts)\n catalog_entry = {\n 'type': 'Feature',\n 'properties': properties,\n 'geometry': geoutils.get_union(geojson)\n }\n catalog_features.append(catalog_entry)\n\n if not os.path.exists(outdir) or not os.path.exists(os.path.join(outdir, \"units.json\")):\n logging.info(\"Generated exploded GeoJSON to \" + outdir)\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n # .json instead of .geojson, incase there is a unit named \"source\"\n utils.write_json(os.path.join(outdir, \"source.json\"), catalog_entry) \n units = []\n for feature in geojson['features']:\n feature_id = str(feature['properties']['id'])\n feature_id = feature_id.replace('/', '')\n feature_filename = os.path.join(outdir, feature_id + \".geojson\")\n utils.write_json(feature_filename, feature)\n units.append(feature['properties'])\n utils.write_json(os.path.join(outdir, \"units.json\"), units)\n else:\n logging.debug(\"exploded GeoJSON already exists, not generating\")\n\n except Exception as e:\n logging.error(str(e))\n logging.exception(\"Error processing file \" + path)\n failures.append(path)\n success = False\n\n catalog = {\n 'type': 'FeatureCollection',\n 'features': catalog_features\n }\n utils.write_json(os.path.join(output,'catalog.geojson'), catalog)\n\n if not success:\n logging.error(\"Failed sources: \" + \", \".join(failures))\n sys.exit(-1)", "def _parse_sources(self):\n return [{\n 'url': (\n 'https://docs.google.com/spreadsheets/d/'\n '1uzgWLWl19OUK6RhkAuqy6O6p4coTOqA22_nmKfzbakE'\n ),\n 'note': 'Google Sheet that Darryl filled out manually'\n }]", "def main(global_config, **settings):\n # http://ldh.clld.org/category/iso639_3/aap-para-arara/\n config = Configurator(settings=settings)\n config.include('clldmpg')\n config.include('clld_glottologfamily_plugin')\n config.registry.registerUtility(LanguageByFamilyMapMarker(), IMapMarker)\n\n # /feed/ -> /sources.atom\n \"\"\"\n/comments/feed/\n/feed/ -> /sources.atom\n \"\"\"\n config.add_301('/about/', lambda req: req.route_url('dataset', id='ldh'))\n config.add_301('/about/objective/', lambda req: req.route_url('about', _anchor='objectives'))\n config.add_301('/repository/', lambda req: req.route_url('about', _anchor='repository'))\n config.add_301('/archive/', lambda req: req.route_url('sources'))\n config.add_301('/category/iso639_3/', lambda req: req.route_url('languages'))\n config.add_301('/category/allgemein/', lambda req: req.route_url('languages'))\n config.add_301('/contact/', lambda req: req.route_url('contact'))\n config.add_301('/for-authors/', lambda req: req.route_url('help'))\n config.add_301('/for-authors/{path}/', lambda req: req.route_url('help'))\n config.add_301('/feed/', lambda req: req.route_url('sources_alt', ext='atom'))\n\n config.add_301(\n '/{year}/{month}/{day}/escidoc{id}/',\n lambda req: req.route_url(\n 'source',\n id='item_{0}'.format(req.matchdict['id'].split('-')[0]),\n _query=req.query_params),\n name='item')\n config.add_301(\n '/category/iso639_3/{id}/',\n lambda req: req.route_url(\n 'language',\n id=req.matchdict['id'].split('-')[0],\n _query=req.query_params),\n name='category')\n\n config.register_download(\n adapters.BibTeX(models.Description, 'ldh', description=\"Descriptions as BibTeX\"))\n\n return config.make_wsgi_app()", "def load_feeds(request: HttpRequest, source_name: str = None) -> JsonResponse:\n\n # We will get the feed of the first source to display\n result = {}\n if source_name is not None:\n if 'checked' in request.GET:\n checked = request.GET.get('checked')\n feeds = Feed.objects\\\n .filter(source__name=source_name)\\\n .filter(checked=checked)\\\n .order_by('-created_at')[:NUMBER_OF_FEEDS_PERPAGE]\n else:\n feeds = Feed.objects\\\n .filter(source__name=source_name)\\\n .order_by('-created_at')[:NUMBER_OF_FEEDS_PERPAGE]\n result['feeds'] = []\n for feed in feeds:\n result['feeds'].append({\n 'id': feed.id,\n 'title': feed.title,\n 'link': feed.link,\n 'content': feed.content,\n 'author': feed.author,\n 'checked': feed.checked,\n 'created_at': feed.created_at.strftime('%a %l:%m%p')\n })\n result['size'] = feeds.count()\n result['status_code'] = 200\n return JsonResponse(result)", "def Sources():\n return _sources", "def getsources(self,\n category='general',\n language=None,\n country='us',\n apiKey=None,\n version=None):\n\n if self.version != 2:\n\n request_params = {\n \"category\":category,\n \"language\": language,\n \"country\":country,\n \"apiKey\": self._api_key,\n }\n\n # retrive the api key if set; otherwise, error\n if not self._api_key:\n raise ValueError(\n 'You must use use an API key; to get a key visit https://news'\n 'api.org/. If you have an API key, set it using the '\n 'Api.SetCredentials method.')\n\n # if api key is there, set the params\n else:\n request_params = {\n \"category\": category,\n 'language': language,\n \"country\": country,\n \"apiKey\": self._api_key,\n }\n\n\n # build the url\n url = self.base_url + self.__endpoints['source']\n\n # make the request\n r = requests.get(url,params=request_params,timeout=self._timeout)\n\n\n # return the json\n return r.json()", "def main():\n for db_csv_export in current_dir.glob(\"template*.csv\"):\n data_projects = load_projects(db_csv_export)\n json_path = db_csv_export.with_suffix(\".json\")\n with open(json_path, \"w\") as fh:\n json.dump(data_projects, fh, indent=2)", "def loadFeeds(self):\n\n metrics = self.config['metrics']\n for metric in metrics:\n metricConf = self.config['metrics'][metric]\n metricConf['name'] = metric\n source = metricConf['source']['driver']\n if 'metrics' not in self.sources[source['name']]:\n self.sources[source['name']]['metrics'] = []\n\n self.sources[source['name']]['metrics'].append(metricConf)", "def main():\n\n obj_lookup = interfaces_dir / \"FrameLib-obj-jlookup.json\"\n\n worker = jParseAndBuild()\n\n refpages = [x for x in refpages_dir.rglob(\"fl.*.xml\")]\n\n for ref in refpages:\n worker.extract_from_refpage(ref)\n\n write_json(obj_lookup, worker.j_master_dict)", "def feed() -> None:\n ...", "def run(self):\n if not os.path.exists(self.output_folder):\n os.makedirs(self.output_folder)\n for entry in glob.glob(os.path.join(self.data_folder, self.data_expression)):\n f = open(entry)\n text = json.loads(f.read())\n f.close()\n self.create_page_objects(text)", "def run(xml_files, gold_files, json_output):\n result = harvest_multi(xml_files, gold_files)\n with open(json_output, 'w') as writer:\n writer.write(json.dumps(result, indent=4))", "def main():\n # %%\n CFG.profiles_yamls_path.mkdir(parents=True, exist_ok=True)\n fpaths = list( _Config.raw_profiles_path.glob('*.html') )\n print( f'{len(fpaths)} htmls found' )\n # %%\n fpath = CFG.raw_profiles_path / 'luis-mario-urrea-murillo.html'\n # %%\n fpath = CFG.raw_profiles_path / 'cristian-david-montoya-saldarriaga-09638514a.html'\n # %%\n fpaths = [ CFG.raw_profiles_path / 'ricardo-alarcon-44079b105.html' ]\n # %%\n fpaths = [ Path('/home/teo/_data/talent/linkedin_raw_profiles/israellaguan.html')]\n # %%\n dics = {}\n # %%\n\n for i, fpath in enumerate(fpaths):\n if fpath in dics:\n continue\n\n with fpath.open('rt') as f_in:\n html = f_in.read()\n\n print( f'\\n***{i+1}/{len(fpaths)} {fpath.name}:')\n dic = extract_one( html, fpath )\n dic['linkedin_url'] = f\"https://www.linkedin.com/in/{fpath.name.split('.')[0]}\"\n dic['scraped_at'] = dt.datetime.fromtimestamp( fpath.stat().st_ctime )\n # pprint(dic['work_stats'])\n dics[fpath] = dic\n\n dics_arr = list(dics.values())\n # %%\n del dics\n # %%\n\n with (CFG.profiles_yamls_path / 'all_profiles.json').open('wt') as f_out:\n json.dump( dics_arr, f_out, cls=DateTimeEncoder, indent=4 )\n # %%\n with (CFG.profiles_yamls_path / 'all_profiles.yaml').open('wt') as f_out:\n yaml.safe_dump( dics_arr, f_out )\n # %%\n df = produce_summary_table( dics_arr )\n df.to_excel( CFG.raw_profiles_path.parent / 'mined_ruby_candidates_sample.xlsx',\n index=False)\n # %%", "def scrape_main() -> None:\n\n logger.info(\"Starting scrape\")\n search_info = construct_scrape_regex_patterns(grab_scrape_info())\n links = run_scrape(\n url=search_info['url'],\n seasons_regex=search_info['seasons'],\n episodes_regex=search_info['episodes']\n )\n if links:\n logger.debug(\"Writing urls to file\")\n with open('urls.txt', 'w') as f:\n for link in links:\n f.write(link + '\\n')\n else:\n logger.warning(\"No links available\")", "def main():\n # parse for provided arguments\n parser = argparse.ArgumentParser()\n\n # main is expecting only one arg - that being the test mode flag\n # test mode flag will be used to optionally commit the db updates\n # (i.e., if we are in test mode don't commit, do a rollback)\n parser.add_argument('keyword', type=str, help='Keyword')\n parser.add_argument('limit', type=int, help='List limit (1-1000)')\n args = parser.parse_args()\n\n # Check to make sure limit it within range\n if args.limit < 1 or args.limit > 1000:\n print 'ERROR: limit [%s] is not in the range 1-1000' % args.limit\n sys.exit(1)\n\n # Retrieve all the repositories from GitHub with the keyword, ordered\n # by descending number of forks\n data = _retrieve_data(args.keyword, args.limit)\n\n # pair down repository dict fields\n for item in data['items']:\n _filter_dict(item, DATA_KEYS)\n _filter_dict(item['owner'], OWNER_KEYS)\n\n # create final dict\n json_dict = {'keyword': args.keyword,\n 'repository_limit': args.limit,\n 'total_repositories': data['total_count'],\n 'repo_list': data['items']}\n\n print json.dumps(json_dict, indent=4)", "def main():\n\n # parses arguments\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-s', action='store', dest='start_index', type=int,\n help='The starting index for events. Default is 0')\n\n parser.add_argument('-e', action='store', dest='end_index', type=int,\n help='The starting index for events. Default is 5,000')\n\n results = parser.parse_args()\n\n start_index = results.start_index or 0\n\n end_index = results.end_index or 5000\n\n scraper = Scraper()\n\n # these are the event column titles from the sample import csv given by localist\n event_column_titles = [\n 'Title','Description','Date From','Date To','Recurrence','Start Time','End Time',\n 'Location','Address','City','State','Event Website','Room','Keywords','Tags',\n 'Photo URL','Ticket URL','Cost','Hashtag','Facebook URL','Group','Department',\n 'Allow User Activity','Allow User Attendance','Visibility','Featured Tabs',\n 'Sponsored','Venue Page Only','Exclude From Trending','Event Types','Invited Audience', 'Original URL',\n 'Location Details'\n ]\n\n out_stream = open('event_import.csv', 'w')\n\n writer = Writer(event_column_titles, out_stream)\n\n writer.write_headers()\n\n # iterates through the specified event numbers and scrapes each one and writes\n # it to the output file\n for i in range(start_index, end_index + 1):\n current_url = 'http://test-ucscevents.pantheonsite.io/event/' + str(i)\n print(\"processing url: \" + current_url)\n r = requests.get(current_url)\n if r.status_code != requests.codes.ok:\n print(' 404')\n else:\n soup = get_soup_from_url(current_url)\n events = scraper.scrape_event(soup)\n for event in events:\n event['Original URL'] = current_url\n\n writer.write_object(event) # event written to output file here\n\n out_stream.close()", "def run(self):\n\t\tfor source in self.sources:\n\t\t\tstringutil.print_color(Fore.GREEN, 'Downloading from Source: %s' % source.get_alias())\n\t\t\tfor r in source.get_elements():\n\t\t\t\tr.set_source(source)\n\t\t\t\tself._queue.put(r)\n\n\t\t\t\t# Extra tracking stuff below:\n\t\t\t\twith self._c_lock:\n\t\t\t\t\tself._total_count+= 1\n\t\t\t\tif self._testing_cache is not None:\n\t\t\t\t\tself._testing_cache.append(r)\n\t\t#print(\"Element loading complete.\\n\")\n\t\tself._running = False", "def main():\n widget = ParseGrypeJSON()\n logging.debug(f'argv {\",\".join(sys.argv)}')\n\n if len(sys.argv) > 1:\n widget.filename(sys.argv[1])\n\n sys.exit(widget.report())", "def run():\n pgconn = util.get_dbconn()\n cursor = pgconn.cursor()\n cursor.execute(\n \"\"\"\n SELECT feedtype from ldm_feedtypes ORDER by feedtype\n \"\"\"\n )\n res = dict(feedtypes=[])\n for row in cursor:\n res[\"feedtypes\"].append(row[0])\n\n return json.dumps(res)", "def fetch_data():\n log = logging.getLogger(__name__)\n log.info('Checking data files...')\n if not os.path.isfile('CGN.txt'):\n params_cgn = {\n 'institute.code': ['NLD037'],\n # 'crops': ['tomato'],\n 'taxonomy.genus': ['Solanum', 'Lycopersicon'],\n 'taxonomy.species': species\n }\n cgn = GenesysParser(params_cgn)\n cgn.fetch2json('CGN.txt')\n log.info('CGN data has been saved.')\n else:\n log.info('CGN data file already exists.')\n\n if not os.path.isfile('USDA.txt'):\n params_usda = {\n 'institute.code': usda_all,\n # 'crops': ['tomato'],\n 'taxonomy.genus': ['Solanum', 'Lycopersicon'],\n 'taxonomy.species': species\n }\n usda = GenesysParser(params_usda)\n usda.fetch2json('USDA.txt')\n log.info('USDA data has been saved.')\n else:\n log.info('USDA data file already exists.')", "def main():\n print get_latest_data()", "def dashboard_article_sources():\n sources = dict()\n past_30 = offset_time_past(30, str=True)\n articles = mongo.db[app.config['ARTICLES_COLLECTION']]\n results = articles.find({'collected': {'$gt': past_30}}, {'_id': 0})\n for result in results:\n sources[result['source']] = sources.get(result['source'], 0) + 1\n sources = sorted(sources.items(), key=operator.itemgetter(1), reverse=True)\n data = sources[:10]\n return jsonify(data)", "def watch_sources():\n sources_info = '\\n' + get_sources_info()\n first_line = True\n for source, line in get_new_lines():\n if first_line:\n first_line = False\n notify(sources_info, 'Started Watching Sources')\n\n # parse the fetched lines, if not alert-worthy, alert_type will be None\n alert_type, title, content = parse_line(line, source)\n\n if alert_type == 'notify':\n notify(content, title)\n\n if alert_type == 'alert':\n alert(content, title)", "def collect():\n datadir = 'data'\n if 'OUTPUT_DATA_DIR' in os.environ:\n datadir = os.environ['OUTPUT_DATA_DIR']\n\n scraper_dir = os.path.join(os.getcwd(), 'scrapers')\n scrapers = get_scraper_list(scraper_dir)\n now = datetime.now()\n total_deals = []\n for scr_instance in scrapers:\n deals = scr_instance.get_deals()\n\n # Map a timestamp on each deal\n for item in deals:\n item.update({'timestamp': now.strftime('%Y-%m-%d')})\n\n print(\"\\n Collected {0} deals for {1} \\n\\n\".format(len(deals), scr))\n\n total_deals += deals\n\n filename = '{0}_resultset.json'.format(now.strftime('%Y%m%d_%H%I%S'))\n\n fh = open(os.path.join(datadir, filename), 'w+')\n fh.write(json.dumps(total_deals))\n fh.close()", "def fetch_all_news_codes():\n response = requests.get(SOURCE_URL)\n json = response.json()\n global news_codes\n for source in json['sources']:\n news_codes.append(source['id'])", "def fetchJson(url):", "def main(**args):\n args = Arguments(args) or get_args()\n last_id = args.last_id\n prev_time = time.time()\n\n since_date = datetime.datetime.strptime(args.since_date or datetime.datetime.today().strftime(\"%Y-%m-%d\"), \"%Y-%m-%d\")\n until_date = datetime.datetime.strptime(args.until_date, \"%Y-%m-%d\") if args.until_date else since_date\n\n captured = 0\n finished = False\n results = []\n urls = set()\n\n if args.kafka_brokers:\n producer = KafkaProducer(bootstrap_servers=args.kafka_brokers,\n api_version=(0, 10, 0),\n value_serializer=lambda v:\n json.dumps(v).encode(\"utf-8\"))\n\n if args.output_json:\n j = open(args.output_json, \"w\")\n\n mediacloud = MediaCloud(args.mediacloud_key)\n\n while not finished and since_date <= until_date:\n solr_filter = mediacloud.publish_date_query(until_date, until_date+datetime.timedelta(days=1))\n log.info(f\"Collecting {solr_filter}...\")\n\n while not finished:\n previous_results = results\n\n try:\n results = mediacloud.storyList(solr_query=args.query,\n solr_filter=solr_filter,\n last_processed_stories_id=last_id,\n rows=args.rows)\n\n if not results\\\n or previous_results == results:\n finished = True\n break\n\n for story in results:\n last_id = story[\"processed_stories_id\"]\n\n if args.lang and (story[\"language\"] != args.lang):\n continue\n\n if story[\"url\"]:\n urls_length = len(urls)\n urls.add(story[\"url\"])\n\n if urls_length != len(urls):\n if args.kafka_brokers:\n producer.send(args.topic,\n process_entry(story),\n key=sha1(story[\"url\"].encode(\"utf-8\")).hexdigest().encode(\"utf-8\"))\n if args.output_json:\n json.dump(story, j)\n j.write(\"\\n\")\n captured += 1\n else:\n log.debug(\"Skipping duplicate document: %s\" % story[\"url\"])\n else:\n log.error(f\"Found invalid document: {story}\")\n\n dt = time.time() - prev_time\n if dt > 10:\n log.info(f\"Captured {captured} article(s).\")\n prev_time = time.time()\n\n if args.limit == captured:\n finished = True\n break\n\n except Exception as e:\n log.warning(f\"{e}\")\n break\n\n until_date = until_date - datetime.timedelta(days=1)\n\n log.info(f\"Total of {captured} captured article(s).\")", "def topheadlines():\n newsSource = click.prompt(\"Please enter your choice from listsources\")\n \n main_url = \"https://newsapi.org/v2/top-headlines?apiKey=f45fa2c71932483f832f0cc745af0325&sources=\"+newsSource\n\n\t# fetching data in json format \n open_headline = requests.get(main_url).json() \n\n\t# getting all headlines in a string articles \n headline = open_headline[\"articles\"] \n\n\t# empty list which will \n\t# contain all trending newssources \n output = [] \n\t\n for h in headline: \n click.echo('\\n')\n click.secho(click.style('TITLE: ' + h['title'], fg='red'))\n click.secho(click.wrap_text(h['description']))\n click.secho(click.style('DOMAIN: ' + h['url'], fg='blue'))\n \n \t\n for i in output[:11]:\n print(i)", "def run(self):\n if self.parsed_args.fetch_cache:\n issues = self.backend.fetch_from_cache()\n else:\n issues = self.backend.fetch(from_date=self.from_date)\n\n try:\n for issue in issues:\n obj = json.dumps(issue, indent=4, sort_keys=True)\n # self.outfile.write(issue['url']+\"\\n\")\n self.outfile.write(obj)\n self.outfile.write('\\n')\n except requests.exceptions.HTTPError as e:\n raise requests.exceptions.HTTPError(str(e.response.json()))\n except IOError as e:\n raise RuntimeError(str(e))\n except Exception as e:\n if self.backend.cache:\n self.backend.cache.recover()\n raise RuntimeError(str(e))", "def main():\n\n print(\"Retreiving BBC playlists for dates between {} and {}\".\n format(start_date.strftime(\"%Y-%m-%d\"), end_date.strftime(\"%Y-%m-%d\")))\n\n # Get daily schedule URLs within date range\n radio6_schedule_list = helpers.bbc_daily_schedule_urls(bbc_radio6_url, helpers.get_date_list(start_date, end_date))\n\n # Get all show URLS\n all_program_urls = []\n for url in radio6_schedule_list:\n all_program_urls += helpers.bbc_program_urls(url)\n\n # Get all track playlists from program URLs\n track_lists = []\n for url in all_program_urls:\n program_playlist = helpers.get_playlist(url)\n track_lists.append(program_playlist)\n\n print(track_lists)\n return track_lists", "def _extract_data_from_feed(self):\n for eco in self.snyk_data:\n if eco == \"java\" and self._parse_data_for_eco(eco):\n logger.info(\"Parsing feed for Maven.\")\n self._add_default_obj_for_eco(\"maven\")\n self._parse_data(self.snyk_data[eco], \"maven\")\n elif eco == \"js\" and self._parse_data_for_eco(eco):\n logger.info(\"Parsing feed for Npm.\")\n self._add_default_obj_for_eco(\"npm\")\n self._parse_data(self.snyk_data[eco], \"npm\")\n elif eco == \"python\" and self._parse_data_for_eco(eco):\n logger.info(\"Parsing feed for Pypi.\")\n self._add_default_obj_for_eco(\"pypi\")\n self._parse_data(self.snyk_data[eco], \"pypi\")\n elif eco == \"golang\" and self._parse_data_for_eco(eco):\n logger.info(\"Parsing feed for Golang.\")\n self._add_default_obj_for_eco(\"golang\")\n self._parse_golang_data(self.snyk_data[eco], \"golang\")\n else:\n logger.info(\"Ignoring the ecosystem {} from the feed\".format(eco))", "def run(self):\n\t\tself._keep_running = True\n\t\tif self._testing_cache is not None:\n\t\t\tself._testing_cache = []\n\n\t\tfor source in self.sources:\n\t\t\ttry:\n\t\t\t\tstringutil.print_color(Fore.GREEN, 'Downloading from Source: %s' % source.get_alias())\n\t\t\t\tfor r in source.get_elements():\n\t\t\t\t\tif not self._keep_running:\n\t\t\t\t\t\treturn\n\t\t\t\t\tr.set_source(source)\n\t\t\t\t\twhile self._keep_running:\n\t\t\t\t\t\ttry: # Keep trying to add this element to the queue, with a timeout to catch any stop triggers.\n\t\t\t\t\t\t\tself._queue.put(r, timeout=1)\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\texcept queue.Full:\n\t\t\t\t\t\t\tpass\n\t\t\t\t\t# Extra tracking stuff below:\n\t\t\t\t\twith self._c_lock:\n\t\t\t\t\t\tself._total_count += 1\n\t\t\t\t\tif self._testing_cache is not None:\n\t\t\t\t\t\tself._testing_cache.append(r)\n\t\t\texcept ConnectionError as ce:\n\t\t\t\tprint(str(ce).upper())\n\t\tself._keep_running = False", "def extract_data():\n args = arguments()\n\n if args.list is not None:\n songs = utility.get_songs(args.list)\n logger.debug(str(songs))\n if len(songs) != 0:\n logger.info(\"Downloading songs in {}\".format(args.list))\n for song_name in songs:\n logger.debug(song_name)\n args.SONG_NAME = [song_name]\n main(args)\n else:\n logger.info(\"{}: is empty\".format(args.list))\n elif args.SONG_NAME and yt.is_playlist(args.SONG_NAME[0]):\n logger.info(\"Youtube playlist passed...extracting!\")\n songs, playlist_name = yt.get_playlist(\n args.SONG_NAME[0],\n args.proxy,\n args.pl_start,\n args.pl_end,\n args.pl_items\n )\n\n # Check if data is actually returned\n if songs is None:\n logger.error(\"Couldn't extract playlist data!\")\n\n logger.info(\"Playlist: {}\".format(playlist_name))\n logger.info(\"{} songs found\".format(len(songs)))\n\n # Iterate and work on the data.\n url_base = \"https://www.youtube.com/watch?v=\"\n for song in songs:\n args.url = url_base + song[\"url\"]\n\n # Try to pass the title as well, if it's not there\n # that will be handled by ytmdl\n try:\n args.SONG_NAME = [stringutils.remove_yt_words(song[\"title\"])]\n except KeyError:\n pass\n\n main(args)\n else:\n main(args)", "def run(self, sources, task=\"discover\", once=False):\n condition = True\n while condition:\n # list of urls\n self.logger.info(\"Reading source urls from '%s'\" % sources)\n self.__read_sources_doc__(sources)\n # reset url --> destination map. New mappings may be configured\n DestinationMap.__set_map_filename__(Config().\n prop(Config.key_location_mapper_destination_file, \"conf/desmap.txt\"))\n # drop to force fresh read from file\n DestinationMap().__drop__()\n # Set the root of the destination folder if configured\n DestinationMap().set_root_folder(Config().prop(Config.key_destination_root))\n # do all the urls\n self.__do_task__(task)\n # report\n self.__do_report__(task)\n # to continue or not to continue\n condition = not (once or self.__stop__())\n if condition:\n pause = Config().int_prop(Config.key_sync_pause)\n self.logger.info(\"Going to sleep for %d seconds.\" % pause)\n self.logger.debug(\"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\")\n time.sleep(pause)\n # repeat after sleep\n condition = not (once or self.__stop__())", "def fetch_data():\n for category in CHEATSHEETS.items():\n subprocess.call(f'curl -o {PATH}{category[0] + \".csv\"} {category[1]}', shell=True)\n\n index = -1\n for filename in os.listdir(PATH):\n for idx, row in pd.read_csv(PATH + filename, on_bad_lines='skip').replace(np.nan, '').iterrows():\n name = row['Model']\n url = REDIRECT_URL + name.lower()\n category = filename.split('.')[0]\n featurizers = row['Acceptable Featurizers'].split(' ') if row['Acceptable Featurizers'] != '' else []\n backends = ['PyTorch' if item in {\"PTorch\", \"Torch\", \"PyTorch \"} else item for item in row['Backend'].split('/')]\n types = row['Type'] if filename != 'general.csv' else row['Classifier/Regressor']\n types = types.split('/') if filename == 'material.csv' else types.split('/ ')\n index += 1\n\n backend_list.append(backends)\n type_list.append(types)\n featurizer_list.append(featurizers)\n model_list.append(Model(name, url, category, featurizers, backends, types, index))", "def scrape_articles(dir_name, source_file):\n news_list = get_file_data(os.path.join(BASE_DIR, source_file))\n redis_con = get_redis_connection()\n coroutines = []\n for link in news_list:\n paper = newspaper.build(link.rstrip('\\n'), memoize_articles=False)\n paper.download()\n for article in paper.articles:\n coroutines.append(process_and_ingest(redis_con, article))\n loop = asyncio.get_event_loop()\n tasks, _ = loop.run_until_complete(asyncio.wait(coroutines))\n for task in tasks:\n save_json_file(task.result(), dir_name)", "def main():\n show_banner()\n args = parse_args(sys.argv[1:])\n urls = get_urls(args.inputfiles)\n if args.only_urls:\n print(\"URL\")\n else:\n print('{:70.70} {}'.format(\"URL\", \"Response\"))\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(download(urls,\n args.concurrency,\n args.only_success,\n args.outputfile,\n args.only_urls))", "def main():\n\n # Set up argument parser.\n parser = argparse.ArgumentParser(\n description='Removes duplicate key-value pairs from JSON files.')\n parser.add_argument('--suffix', default='',\n help='optional suffix for output files; '\n 'if empty, files will be changed in place')\n parser.add_argument('files', nargs='+', help='input files')\n args = parser.parse_args()\n\n # Iterate over files.\n for filename in args.files:\n # Read in json using Python libraries. This eliminates duplicates.\n print('Processing ' + filename + '...')\n try:\n with codecs.open(filename, 'r', 'utf-8') as infile:\n j = json.load(infile)\n except ValueError as e:\n print('Error reading ' + filename)\n raise InputError(filename, str(e))\n\n # Built up output strings as an array to make output of delimiters easier.\n output = []\n for key in j:\n if key != '@metadata':\n output.append('\\t\"' + key + '\": \"' +\n j[key].replace('\\n', '\\\\n') + '\"')\n\n # Output results.\n with codecs.open(filename + args.suffix, 'w', 'utf-8') as outfile:\n outfile.write('{\\n')\n outfile.write(',\\n'.join(output))\n outfile.write('\\n}\\n')", "def main(argv=sys.argv[1:]): # pylint: disable=dangerous-default-value\n parser = argparse.ArgumentParser()\n image = argparse.ArgumentParser()\n parser.add_argument(\"action\", choices=[\"image\", \"sources\"])\n\n image.add_argument('-d', '--download',\n help='Download the result to a file.',\n default=False, action=\"store_true\")\n image.add_argument('-f', '--file',\n help=\"Filename to download to.\",\n default=lambda x: x.split(\"/\")[-1])\n image.add_argument('source', help=\"Image source to use.\")\n image.add_argument('query', help=\"Tags to use during search.\",\n default='', nargs=\"*\")\n\n args = parser.parse_args(argv)\n\n if args.action == \"sources\":\n sources = \"\\n\".join(\"\\n\".join(v for v in source) for source in\n nsfw_dl.SOURCES.values())\n print(sources)\n\n else:\n args = image.parse_args(argv[1:])\n download(args.source, args.query, args.file, args.download)", "def main():\n # There are no args, but parse them just so help works\n args = docopt(__doc__)\n print(process_files_json(), end=\"\")\n return None", "def show_sources_category(category):\n if category not in NEWS_CATEGORIES:\n print(\"Invalid category\")\n sys.exit(1)\n\n url = \"?category={category_type}\"\n response = requests.get((SOURCE_URL+url).format(category_type=category))\n json = response.json()\n for source in json['sources']:\n print(u\"{0}: <{1}> {2}\".format(\"News Code\", source['id'], source['name']))", "def loadDrivers(self):\n\n self.sources = {}\n for source in self.config['sources']:\n sourceConf = self.config['sources'][source]\n baseClass = sourceConf['baseClass']\n self.logger.debug(\"Loading: \" + source +\n \" instance of: \" + baseClass)\n sourceArgs = sourceConf['source-config']\n self.sources[source] = {}\n try:\n print(baseClass)\n tempModule = import_module('sources.' + baseClass)\n \"\"\"tempModule = __import__('sources.' + baseClass,\n globals(), locals(), [baseClass], -1)\n \"\"\"\n self.sources[source]['source'] = getattr(tempModule, str(\n baseClass))(sourceArgs)\n except Exception as e:\n self.logger.error(\"exception: \" + str(e))\n return None", "def run(self):\n # read inputs\n indent = int(self.tcex.playbook.read(self.args.indent))\n byte_json_data = self.tcex.playbook.read(self.args.json_data)\n\n json_string = byte_json_data.decode()\n json_data = json.loads(json_string)\n\n try:\n # 1. each json_data['alerts'] is an identifier\n for alerts in json_data['alerts']:\n # 2. for each, 'items', add key:identifier name,\n identifier_name = alerts.get(\"name\") \n for item in alerts.items():\n for item in alerts['items']:\n item['source_identifier'] = identifier_name\n self.all_items.append({'key': item['id'], 'value': item})\n\n except Exception:\n self.tcex.exit(1, 'Failed parsing JSON data.')\n\n # set the App exit message\n self.exit_message = 'Firework Alert Ingested.'", "def process_cmd():\n web_scraper = SainsburyWebscraper()\n logger.info(\"Sainsbury web scraper initialized and loaded data from SainsburyWebscraper\")\n\n json_data = web_scraper.get_product_data()\n logger.info(\"Found %s products with the following data:\" % len(json_data[\"results\"]))\n print json.dumps(json_data, indent=4, sort_keys=True)", "def query_initial_sources(self):\n self.search_thread_pool.waitForDone(0)\n # self.init_vector_layers()\n username, password, api_key, max_items_to_return = SettingsOps.get_settings()\n errors = []\n SettingsOps.validate_stored_info(username, password, api_key, max_items_to_return, errors)\n if len(errors) == 0:\n source_runnable = SourceRunnable(username, password, api_key, DEFAULT_ORDER_PARAMS)\n source_runnable.source_object.task_complete.connect(self.on_new_source)\n self.init_progress_bar()\n self.search_thread_pool.start(source_runnable)", "def get_results():\n # store info in a dictionary {name -> shortname}\n res = {}\n session = requests.Session()\n handle_url('http://www.gocomics.com/features', session, res)\n handle_url('http://www.gocomics.com/explore/editorial_list', session, res)\n handle_url('http://www.gocomics.com/explore/sherpa_list', session, res)\n save_result(res, json_file)", "def setup():\n # derive log file name from script name\n log_filename = '{}{:s}'.format(pathlib.Path(__file__).resolve().stem, '.log')\n\n # read command line arguments (https://docs.python.org/3/howto/argparse.html)\n argparser = argparse.ArgumentParser(description='Collects official SARS-CoV-2 infection statistics published by the city of Dresden.')\n arg_group_inputs = argparser.add_argument_group('input options', 'by default, the data is obtained online from the city\\'s official source, but other import options are also available')\n arg_group_timestamps = argparser.add_mutually_exclusive_group()\n arg_group_outputs = argparser.add_argument_group('output options', 'new data is saved in InfluxDB by default; this and other behaviour concerning data writing can be adjusted with these output options')\n arg_group_outputs.add_argument('-a', '--archive-json', help='archive JSON file each time new data is found or force-collected', action='store_true')\n argparser.add_argument('-c', '--force-collect', help='store JSON data, regardless of whether new data points have been found or not', action='store_true')\n arg_group_timestamps.add_argument('-d', '--date', help='set publishing date manually for the new data set, e. g. \\'2020-10-18T09:52:41Z\\'')\n arg_group_inputs.add_argument('-f', '--file', help='load JSON data from a local file instead from server; if no publishing date is passed with the \\'--date\\' or \\'--auto-date\\' option, an attempt is made to read the date from the filename', nargs='?', type=argparse.FileType('r'), const='query.json') # 'const' is used, if '--file' is passed without an argument; default=sys.stdin; https://stackoverflow.com/a/15301183/7192373\n arg_group_outputs.add_argument('-l', '--log', help='save log in file \\'{:s}\\''.format(log_filename), action='store_true')\n arg_group_outputs.add_argument('-n', '--no-cache', help='suppress the saving of a JSON cache file (helpful if you do not want to mess with an active cron job looking for changes)', action='store_true')\n arg_group_outputs.add_argument('-o', '--output-dir', help='set a user defined directory where data (cache, logs and JSONs) are stored; default: directory of this Python script', default=pathlib.Path(pathlib.Path(__file__).resolve().parent, OUTPUT_FOLDER)) # use absolute path of this Python folder as default directory\n arg_group_outputs.add_argument('-s', '--skip-influxdb', help='check for and write new JSON data only, do not write to InfluxDB', action='store_true')\n arg_group_timestamps.add_argument('-t', '--auto-date', help='do not try to to parse the publishing date from the filename, instead write current date (UTC) to database', action='store_true')\n arg_group_inputs.add_argument('-u', '--url', help='URL to be used to check for JSON updates; default: \\'arcgis\\'', choices=['arcgis', 'github'], default='arcgis', type=str.lower)\n argparser.add_argument('-v', '--verbose', help='print debug messages', action='store_true')\n\n global args\n args = argparser.parse_args()\n\n if args.verbose:\n log_level = logging.DEBUG\n else:\n log_level = logging.INFO\n\n # setup logging\n global logger\n logger = logging.getLogger()\n logger.setLevel(log_level)\n\n # log format\n logging_format = '[%(asctime)s] %(levelname)s %(message)s' # %(name)s.%(funcName)s %(pathname)s:\n log_formatter = logging.Formatter(logging_format) #, datefmt=\"%Y-%m-%dT%H:%M:%S\")\n\n # log to console\n handler = logging.StreamHandler()\n handler.setFormatter(log_formatter)\n logger.addHandler(handler)\n\n # get path for output\n global output_dir\n try:\n output_dir = pathlib.Path(args.output_dir)\n except TypeError:\n logger.error(f'Could not resolve output directory \\'{args.output_dir}\\'.')\n sys.exit()\n\n # log to file\n if args.log:\n handler = logging.handlers.RotatingFileHandler(pathlib.Path(output_dir, log_filename), maxBytes=2**20, backupCount=5) # https://stackoverflow.com/a/13733777/7192373; https://docs.python.org/3/library/logging.handlers.html#logging.handlers.RotatingFileHandler\n handler.setFormatter(log_formatter)\n logger.addHandler(handler)\n\n # setup DB connection\n if not args.skip_influxdb:\n global db_client\n db_client = InfluxDBClient(host='localhost', port=8086) # https://www.influxdata.com/blog/getting-started-python-influxdb/\n db_client.create_database(INFLUXDB_DATABASE)\n db_client.switch_database(INFLUXDB_DATABASE)", "def main():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"api_base_url\", type=str,\n help=\"base url for all tests\")\n parser.add_argument(\"test_file_name\", type=str,\n help=\"name of file containing JSON array of tests\")\n parser.add_argument(\"-f\", \"--format\", default=\"json\", type=str,\n help=\"output format - must be either json or text\")\n\n args = parser.parse_args()\n\n try:\n\n run_tests_from_file(args.api_base_url, args.test_file_name, \n args.format)\n\n except KeyError as e:\n print(\"Required key '%s' not found. Check tests file.\" % str(e.args[0]))\n exit(1)\n\n except FileNotFoundError:\n print(\"Cannot open file '%s'. File not found.\" % args.test_file_name)\n exit(1)\n\n except ValueError:\n print(\"Cannot decode JSON from file '%s'.\" % args.test_file_name)\n exit(1)", "def get(self):\n CACHE_KEY = 'sources'\n if not memcache.get(CACHE_KEY):\n logging.info('Populating cache.')\n feeds = Feed.all().order('name')\n feed_list = []\n for feed in feeds:\n feed_list.append(feed.ToDict())\n memcache.add(CACHE_KEY, simplejson.dumps(feed_list), 600)\n logging.info('Using cache.')\n logging.info(memcache.get(CACHE_KEY))\n self.response.headers['Content-Type'] = 'application/json'\n self.response.out.write(memcache.get(CACHE_KEY))", "def main():\n\n # configure logging #\n logging.basicConfig(level=logging.INFO, format=\"%(asctime)s - %(name)s - [%(levelname)s] %(message)s\")\n coloredlogs.install(level=\"INFO\", logger=logging.getLogger())\n\n parser = argparse.ArgumentParser(description=\"Any.Run API on the CLI\")\n parser.add_argument(\"-d\", \"--debug\", action=\"store_true\", help=\"Turn on debug logging.\")\n parser.add_argument(\"-sh\", \"--show-history\", action=\"store_true\", help=\"Show analysis history.\")\n parser.add_argument(\"-e\", \"--environments\", action=\"store_true\", help=\"Get AnyRun environments.\")\n parser.add_argument(\"-u\", \"--user-limits\", action=\"store_true\", help=\"Get AnyRun user details.\")\n parser.add_argument(\n \"--raw-get\", action=\"store\", help=\"Supply a URL to attempt to stream any content from (pipe to file).\"\n )\n\n subparsers = parser.add_subparsers(dest=\"command\")\n get_parser = subparsers.add_parser(\"get\", help=\"Get analysis report data by task ID.\")\n get_parser.add_argument(\"task\", action=\"store\", help=\"An analysis task id.\")\n get_parser.add_argument(\"-p\", \"--pcap\", action=\"store_true\", help=\"Download any pcap available for given report.\")\n get_parser.add_argument(\"-i\", \"--ioc\", action=\"store_true\", help=\"Download IOCs for report\")\n get_parser.add_argument(\"-s\", \"--summary\", action=\"store_true\", help=\"Get report summary\")\n get_parser.add_argument(\"--json\", action=\"store_true\", help=\"If json results, return json.\")\n\n submit_parser = subparsers.add_parser(\"submit\", help=\"Submit file for analysis.\")\n submit_parser.add_argument(\"file\", action=\"store\", help=\"Path to file to submit.\")\n\n args = parser.parse_args()\n\n if args.debug:\n coloredlogs.install(level=\"DEBUG\", logger=logging.getLogger())\n\n config = configparser.ConfigParser()\n config.read(CONFIG_PATHS)\n\n host = config[\"default\"].get(\"host\")\n apikey = config[\"default\"].get(\"api_key\")\n\n anyrun = AnyRunClient(apikey, host=host)\n\n try:\n if args.show_history:\n LOGGER.info(\"getting history\")\n print(anyrun.get_history())\n return True\n elif args.environments:\n LOGGER.info(\"Getting environments.\")\n print(anyrun.get_environment())\n return True\n elif args.user_limits:\n LOGGER.info(\"getting user limits.\")\n print(anyrun.get_user())\n return True\n elif args.raw_get:\n LOGGER.info(f\"Attempting to stream content from {args.raw_get}\")\n r = anyrun.get(args.raw_get)\n with open(\"anyrun.raw_get.output\", \"wb\") as fp:\n fp.write(r.content)\n if os.path.exists(\"anyrun.raw_get.output\"):\n print(f\"wrote: anyrun.raw_get.output\")\n return\n elif args.command == \"get\":\n if args.pcap:\n LOGGER.info(f\"Downloading pcap for {args.task}\")\n r = anyrun.download_report_pcap(args.task)\n return r\n elif args.ioc:\n LOGGER.info(f\"Downloading IOCs for {args.task}\")\n write_path = f\"{args.task}.anyrun.ioc.json\"\n if args.json:\n write_path = False\n r = anyrun.get_report_iocs(args.task, write_path=write_path)\n return r\n elif args.summary:\n LOGGER.info(f\"Downloading report summary for {args.task}\")\n write_path = f\"{args.task}.anyrun.summary.json\"\n if args.json:\n write_path = False\n r = anyrun.get_report_summary(args.task, write_path=write_path)\n return r\n else:\n # by default, download the full report\n LOGGER.info(f\"Getting analysis report for {args.task}\")\n write_path = f\"{args.task}.anyrun.json\"\n if args.json:\n write_path = False\n r = anyrun.get_report(args.task, write_path=write_path)\n return r\n elif args.command == \"submit\":\n raise NotImplementedError(\"Submit is not yet implemented. Use the ANY.RUN GUI.\")\n except Exception as e:\n LOGGER.critical(e)\n\n return True", "def start_requests(self):\n authors_pandas = conf.read_from_data('authors.json')\n author_link_list = list(\n map(lambda obj: (obj['keyUrl'], conf.gd_base_url + obj['article_url'], obj['article_url']),\n authors_pandas))\n for link in author_link_list:\n yield Request(url=link[1])", "def main():\n test_network_connection()\n parser()", "def feed_records(self):\n if not self.stats_file:\n return\n\n with open(self.stats_file) as fh:\n reader = reverse_file(fh)\n for line in reader:\n if line is None:\n return\n if not line:\n continue\n\n try:\n js = json.loads(line)\n except Exception as e:\n continue\n\n yield js", "def scanFeedList(self): \r\n data = self.feed_handler.listScanFeeds()\r\n data = data[:MAX_FEEDS_SCAN]\r\n for idx, feed in enumerate(data):\r\n print \"feeds ... / [%s/%s] (%s docs:%s passed)\" % (idx, len(data),self.feed_item_ctr, self.feed_passed)\r\n try:\r\n baseURL = feed.mainUrl\r\n self.processData(baseURL) \r\n self.createFeedItems()\r\n except Exception, ex:\r\n print(\"ERR: failed to process data and create feed item=%s\" % ex)\r\n print \"done\"", "def build():\n for root, dirs, files in os.walk(IN_PATH):\n for filename in files:\n if filename.endswith('.csv'):\n with open(os.path.join(IN_PATH, filename), encoding='utf-8') as f:\n reader = csv.reader(f)\n next(reader)\n data = nested_dict()\n web_data = nested_dict()\n for row in reader:\n if row[0].startswith('report.') or row[0].startswith('cardset.'):\n d = data\n elif row[0].startswith('web.'):\n d = web_data\n path = row[0].split('.')\n for i in range(len(path)):\n if i == len(path) - 1:\n d[path[i]] = row[1]\n else:\n d = d[path[i]]\n with open (os.path.join(OUT_PATH, filename.replace('.csv', '.json')), 'w', encoding='utf-8') as fout:\n json.dump({**data, **web_data}, fout)\n with open (os.path.join(WEB_PATH, filename.replace('.csv', '.js')), 'w', encoding='utf-8') as fout:\n fout.write('var STRINGS = {};'.format(json.dumps(web_data)))\n\n with open(os.path.join(IN_PATH, 'en_US.csv'), encoding='utf-8') as f:\n reader = csv.reader(f)\n next(reader)\n data = nested_dict()\n web_data = nested_dict()\n for row in reader:\n path = row[0].split('.')\n if row[0].startswith('report.') or row[0].startswith('cardset.'):\n d = data\n elif row[0].startswith('web.'):\n d = web_data\n\n for i in range(len(path)):\n if i == len(path) - 1:\n d[path[i]] = zz_string(row[1], row[0])\n else:\n d = d[path[i]]\n with open(os.path.join(OUT_PATH, 'zz_ZZ.json'), 'w', encoding='utf-8') as fout:\n json.dump({**data, **web_data}, fout)\n with open(os.path.join(WEB_PATH, 'zz_ZZ.js'), 'w', encoding='utf-8') as fout:\n fout.write('var STRINGS = {};'.format(json.dumps(web_data)))", "def execute():\n # Write JSON files for each country. Ex: US.json lists ALL recently active users from the USA\n for country_code in config['countries']:\n json_data = get_users(country_code=country_code)\n write_json(json_data=json_data, country_code=country_code)", "def main():\n download_insert_title_basics()\n download_insert_title_principals()\n download_insert_name_basics()\n download_insert_title_ratings()\n scrap_keywords()\n create_and_insert_soup()\n return", "def main(argv):\n\n output_filename = ''\n input_filename = ''\n langCode = 'en'\n language = False\n\n # add support for default (en) language\n language = gettext.translation(\n 'webperf-core', localedir='locales', languages=[langCode])\n language.install()\n _ = language.gettext\n\n try:\n opts, args = getopt.getopt(\n argv, \"hi:o:\", [\"help\", \"input=\", \"output=\"])\n except getopt.GetoptError:\n print(main.__doc__)\n sys.exit(2)\n\n if (opts.__len__() == 0):\n print(main.__doc__)\n sys.exit(2)\n\n for opt, arg in opts:\n if opt in ('-h', '--help'): # help\n print(main.__doc__)\n sys.exit(2)\n elif opt in (\"-i\", \"--input\"): # input file path\n input_filename = arg\n\n file_ending = \"\"\n file_long_ending = \"\"\n if (len(input_filename) > 4):\n file_ending = input_filename[-4:].lower()\n if (len(input_filename) > 7):\n file_long_ending = input_filename[-7:].lower()\n\n if file_long_ending == \".sqlite\":\n from engines.sqlite import read_sites, add_site, delete_site\n elif (file_ending == \".csv\"):\n from engines.csv import read_sites, add_site, delete_site\n elif (file_ending == \".xml\"): # https://example.com/sitemap.xml\n from engines.sitemap import read_sites, add_site, delete_site\n else:\n from engines.json import read_tests, read_sites, add_site, delete_site\n pass\n elif opt in (\"-o\", \"--output\"): # output file path\n output_filename = arg\n pass\n\n tests = read_tests(input_filename, 0, -1)\n generated_date = False\n co2s = list()\n\n for test in tests:\n if not generated_date:\n generated_date = datetime.fromisoformat(\n test[FIELD_INDEX_DATE]).strftime('%Y-%m-%d')\n\n str_data = test[FIELD_INDEX_DATA].replace('\\'', '\"')\n data = json.loads(str_data)\n print(str_data)\n co2s.append(data['co2'])\n\n if not generated_date:\n generated_date = datetime.today().strftime('%Y-%m-%d')\n\n output_content = \"# This array was last generated with carbon-rating.py on {0}\\n\".format(\n generated_date)\n output_content += \"def get_generated_date():\\n\"\n output_content += \"\\treturn '{0}'\\n\".format(\n generated_date)\n output_content += \"\\n\"\n output_content += \"def get_percentiles():\\n\"\n output_content += \"\\treturn [\\n\"\n\n co2s_sorted = sorted(co2s)\n\n intervals = list()\n\n index = 1\n while (index <= 100):\n percentile = getPercentile(co2s_sorted, index)\n intervals.append(percentile)\n position = index - 1\n if index < 100:\n if position % 10 == 0 and position != 0:\n output_content += \"\\t\\t# {0} percentile\\n\".format(position)\n\n output_content += \"\\t\\t{0},\\n\".format(percentile)\n else:\n output_content += \"\\t\\t{0}\\n\".format(percentile)\n index += 1\n\n output_content += \"\\t]\"\n\n print(output_content)\n if (len(output_filename) > 0):\n write(output_filename, output_content)", "def sources(headless, num, bin_path, chrome_args):\n chrome_args = chrome_args.split(',')\n _args = []\n for arg in chrome_args:\n if len(arg) > 0:\n if not arg.startswith('--'):\n arg = '--{}'.format(arg)\n _args.append(arg)\n chrome_args = _args\n client = proxytools.Client()\n urls = client.get_source_urls(headless=headless, num=num, bin_path=bin_path, chrome_args=chrome_args)\n print(json.dumps(urls, indent=4))", "def get_feed():\n return jsonify(dict({\n \"result\": mongo.get_hpfeed(),\n \"code\": 200\n }))", "def run(self):\n\n for url in self.urls:\n try:\n # Use requests to retrieve web page data\n print(url)\n response = session.get(url, ) # allow_redirects=True)\n\n if response.status_code != 200:\n print('Failed to retrieve page, URL: {0}, error: {1}\\n'.format(url, response.status_code))\n return\n\n # Get web page data from HTML response\n content = get_json_data(response.text)\n\n # Compile data into dictionary to be used for reporting\n summary_data = generate_report(content)\n\n # Generate/print report\n print_report(summary_data)\n\n except Exception as error:\n print('Scraper failed to run for URL {0}, error: {1}, {2}\\n'.format(\n url, type(error).__name__, error\n ))\n\n # time.sleep(1) # for load concerns", "def main():\n\t\tn = 0 \n\t\tfor page in range(pages):\n\t\t\t\tpageNumber = str(page + 1)\n\t\t\t\tprint \"Processing page number \" + pageNumber\n\t\t\t\tpageUrl = 'https://api.github.com/users/' + USER + '/gists?page=' + pageNumber + '&per_page=' + str(int(perpage))\n\t\t\t\tu = urlopen (pageUrl)\n\t\t\t\tgists = json.load(u)\n\t\t\t\t\t\t \n\t\t\t\tfor gist in gists:\n\t\t\t\t\t\tn += 1\n\t\t\t\t\t\tprint \"==== %d ====\" % n\n\t\t\t\t\t\t# print gist.keys()\n\t\t\t\t\t\tgistd = gist['id']\n\t\t\t\t\t\tgisturl = gist['html_url']\n\t\t\t\t\t\tgistdesc = gist['description'] or gistd\n\t\t\t\t\t\tgistfiles = gist['files']\n\t\t\t\t\t\tprint \"gistd: \", gistd\n\t\t\t\t\t\tprint \"gisturl: \", gisturl\n\t\t\t\t\t\tprint \"gistdesc: \", gistdesc\n\t\t\t\t\t\tprint \"gistfiles: \", len(gistfiles)\n\t\t\t\t\t\tfor f in gistfiles:\n\t\t\t\t\t\t\t\tfileurl = gistfiles[f]['raw_url']\n\t\t\t\t\t\t\t\t_filetype = gistfiles[f]['language']\n\t\t\t\t\t\t\t\tif _filetype in ALLOWED_FILE_TYPES:\n\t\t\t\t\t\t\t\t\t\tfiletype = _filetype\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tfiletype = \"None\"\n\t\t\t\t\t\t\t\tprint \"fileurl: \", fileurl \n\t\t\t\t\t\t\t\tprint \"filetype: \", filetype, \"(found='%s')\" % _filetype \n\t\t\t\t\t \n\t\t\t\t\t\t\t\tif TESTING:\n\t\t\t\t\t\t\t\t\t\t# testing\n\t\t\t\t\t\t\t\t\t\treq = urlopen(fileurl)\n\t\t\t\t\t\t\t\t\t\tbodytext = req.read()\n\t\t\t\t\t\t\t\t\t\tencoding=req.headers['content-type'].split('charset=')[-1]\n\t\t\t\t\t\t\t\t\t\tucontent = unicode(bodytext, encoding)\n\t\t\t\t\t\t\t\t\t\tbodytext = \"# \" + gisturl + \"\\n\\n\" + ucontent\n\t\t\t\t\t\t\t\t\t\t# bodytext = ucontent\n\t\t\t\t\t\t\t\t\t\timport_dash(gistdesc, bodytext, filetype)\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t\t\t\treq = urlopen(fileurl)\n\t\t\t\t\t\t\t\t\t\t\t\tbodytext = req.read()\n\t\t\t\t\t\t\t\t\t\t\t\tencoding=req.headers['content-type'].split('charset=')[-1]\n\t\t\t\t\t\t\t\t\t\t\t\tucontent = unicode(bodytext, encoding)\n\t\t\t\t\t\t\t\t\t\t\t\tbodytext = \"# \" + gisturl + \"\\n\\n\" + ucontent\n\t\t\t\t\t\t\t\t\t\t\t\t# bodytext = ucontent\n\t\t\t\t\t\t\t\t\t\t\t\timport_dash(gistdesc, bodytext, filetype)\n\t\t\t\t\t\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\t\t\t\t\t\t\tprint e\n\t\t\t\t\t\t\t\t\t\t\t\tprint \"*** ERROR WRITING TO sqlite3 ***\"\n\t\t\t\t\t\t\t\t\t\t\t\tpass\n\n\t\t\t\tif TESTING:\n\t\t\t\t\t\t# so to avoid calling github API too much...\n\t\t\t\t\t\tbreak", "def get_headlines(outlet):\n if outlet == \"BBC\":\n parser = news_parser.BBC(\"https://www.bbc.co.uk\")\n elif outlet == \"DailyMail\":\n parser = news_parser.DailyMail(\"https://www.dailymail.co.uk\")\n elif outlet == \"Guardian\":\n parser = news_parser.Guardian(\"https://www.theguardian.com\")\n elif outlet == \"Metro\":\n parser = news_parser.Metro(\"https://www.metro.co.uk\")\n elif outlet == \"Mirror\":\n parser = news_parser.Mirror(\"https://www.mirror.co.uk/news/\")\n elif outlet == \"Reuters\":\n parser = news_parser.Reuters(\"https://uk.reuters.com\")\n elif outlet == \"Sun\":\n parser = news_parser.Sun(\"https://www.thesun.co.uk\")\n elif outlet == \"Independent\":\n parser = news_parser.Independent(\"https://www.independent.co.uk\")\n else:\n parser = news_parser.BBC(\"https://www.bbc.co.uk/news\")\n \n index = outlets.index(outlet)\n url_list = []\n while len(url_list) < 50:\n opts = {\n 'language': ['en'],\n 'source_id': [ids[index]],\n 'published_at_start':'NOW-1DAY',\n 'published_at_end':'NOW',\n 'sort_by': 'hotness',\n 'sort_direction': 'desc',\n 'cursor': '*',\n 'per_page': 100\n }\n\n try:\n api_response = api_instance.list_stories(**opts)\n for story in api_response.stories:\n url = story.links.permalink\n if url:\n url_list.append(url)\n except ApiException as e:\n print(\"Exception when calling DefaultApi->list_stories: %s\\n\" %e)\n \n opts['cursor'] = api_response.next_page_cursor\n \n url_list = url_list[:50]\n \n articles_list = []\n for url in url_list:\n raw_article = parser.get_article(url)\n if raw_article is not None:\n articles_list.append(raw_article)\n\n articles = []\n for article in articles_list:\n parsed_article = parser.parse(article)\n if parsed_article is not None:\n articles.append(parsed_article)\n \n if len(articles) > 30:\n articles = articles[:30]\n\n return articles", "def main():\r\n# Checking if argument was provided\r\n if len(sys.argv) <=1:\r\n print_usage()\r\n sys.exit(1)\r\n \r\n for arg in sys.argv:\r\n # Checking if help was called\r\n if arg == \"-h\" or arg == \"--help\":\r\n print_usage()\r\n sys.exit(1)\r\n \r\n # Checking for verbose mode \r\n if arg == \"-v\" or arg == \"--verbose\":\r\n global verbose_flag\r\n verbose_flag=1\r\n\r\n # Checking for input file\r\n if arg == \"-f\" or arg == \"--file\":\r\n global default_input_path\r\n global default_output_path\r\n default_input_path = sys.argv[2]\r\n default_output_path=default_input_path[:-4] + \"_results.txt\"\r\n\r\n #if arg == \"-u\" or arg == \"--url\":\r\n # input_url = sys.argv[2]\r\n\t \r\n if os.name == \"nt\":\r\n os.system('cls')\r\n else:\r\n os.system('clear')\r\n \r\n process_from_file()", "def main(output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n\n baseurl = 'http://codeandbeer.org/virtual/BigData/Labs/'\n files = ['Booking-20151012-1322.csv', 'Booking-20181025-1232.csv']\n for filename in files:\n r = requests.get(baseurl+filename, stream=True)\n if r.status == 200:\n with open(output_filepath+\"/\"+filename, \"wb\") as f:\n f.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)", "def GetResourcesSample():\n client = CreateClient()\n # Get a feed and print it\n feed = client.GetResources()\n PrintFeed(feed)", "def main():\n inputs = []\n files = set()\n\n args = parseArguments()\n\n # Configure the stdout logger\n logging.basicConfig(format=\"%(filename)s: %(levelname)s: %(message)s\",\n level=logging.DEBUG)\n\n try:\n # Create a list of input format objects\n for gcsv in args.gcsv.split():\n inputs.append(GoogleCSV(gcsv))\n for plain in args.plain.split():\n inputs.append(Plain(plain))\n\n # Get the URLs\n urls = mergeURLS(inputs)\n\n # Get the files\n for dir in args.dirs.split():\n files = files.union(formatFiles(dir, args.utc, args.ext))\n\n # Search for matches\n redirects = fuzzySearch(urls, files, args.matches, args.cutoff)\n\n except Exception as e:\n logging.error(e)\n\n if args.output == \"csv\":\n out = CSV(redirects, args.subdomain)\n elif args.output == \"rack\":\n out = Rack(redirects, args.subdomain)\n else:\n out = OutputFormat(redirects, args.subdomain)\n\n print(out)", "def main():\n # Pull variables from pf\n profileref = pfget('google_mapfeed.pf', profile)\n dbname = profileref['dbname']\n path = profileref['webbase']\n finalfile = '%s/%s' % (path, profileref['file'])\n bufferfile = '%s+' % finalfile\n max_nquakes = 600\n element_fields = ['lat', 'lon', 'depth', 'time', 'local_timestring', 'utc_timestring', 'magnitude', 'auth']\n\n if verbose:\n print \"Start: Creating main JSON file '%s' for all stations at %s\" % (finalfile, time.strftime(\"%a, %d %b %Y %H:%M:%S +0000\", time.gmtime()))\n\n now = time.time()\n # Set time zone\n os.putenv('TZ','US/Pacific')\n time.tzset()\n if verbose:\n print \"The time zone is: %s\" % (time.tzname)[0]\n print \"The current time is: %s\" % now\n\n # Override defaults\n if override_number:\n if verbose:\n print \"Overriding default number of events (%d) with %d\" % (max_nquakes, override_number)\n nquakes = override_number\n else:\n nquakes = max_nquakes\n if override_timerange:\n if verbose:\n print \"Overiding default number of events (%d) with time range %d seconds\" % (max_nquakes, override_timerange)\n nquakes = False\n\n # Database processing\n if verbose:\n print \"Opening database\";\n print \"Number of events requested: %s\" % nquakes\n db = dbopen(dbname, 'r')\n\n '''\n Occasionally there is more than one magnitude for a single orid\n (such as provided by QED). We need the most recent magnitude for\n a given orid, so sort on orid and lddate, then group on orid,\n then get the most recent record number (greatest lddate) for each\n group. Add that to a dictionary we will use later.\n '''\n netmag_dict = {}\n db_netmag = dblookup(db, table='netmag')\n db_netmag.sort(['orid', 'lddate'])\n db_netmag_grp = dbgroup(db_netmag, 'orid')\n if verbose:\n print \"There are %s records\" % db_netmag_grp.query('dbRECORD_COUNT')\n for i in range(db_netmag_grp.query('dbRECORD_COUNT')):\n db_netmag_grp[3] = i\n orid, [dbptr, view, end_record, start_record] = db_netmag_grp.getv('orid', 'bundle')\n if verbose:\n print \"\\t- Iteration: %s: Orid: %s, Start record: %s, End record: %s\"% (i, orid, start_record, end_record)\n db_netmag[3] = end_record - 1\n if verbose:\n print \"\\t\\t- Magnitude: %s, Magtype: %s\" % (db_netmag.getv('magnitude')[0], db_netmag.getv('magtype')[0] )\n magnitude, magtype = db_netmag.getv('magnitude', 'magtype')\n netmag_dict[orid] = { 'rec':end_record, 'magnitude':magnitude, 'magtype':magtype }\n\n '''\n if verbose:\n for key in sorted(netmag_dict.iterkeys()):\n print \"%s: %s\" % (key, netmag_dict[key])\n '''\n\n '''\n Now get the event information\n '''\n db.lookup(table='origin')\n db.join('event')\n if verbose:\n print \"Number of joined records of event and origin tables: %s\" % db.query('dbRECORD_COUNT')\n if override_timerange:\n override_oldest = now - override_timerange\n if verbose:\n print \"Override time defined - get events in the last %s seconds - 'time >= %s'\" % (override_timerange, override_oldest)\n db.subset('time >= %d' % override_oldest)\n if verbose:\n print \"Subset on time. Number of records: %s\" % db.query('dbRECORD_COUNT')\n # Join views\n # db_joined = dbjoin(db, db_netmag)\n\n if verbose:\n print \"Subset orid == prefor\"\n db.subset('orid == prefor')\n if verbose:\n print \"Number of subsetted records: %s\" % db.query('dbRECORD_COUNT')\n print \"Subset for time != NULL\"\n db.subset('time != NULL')\n if verbose:\n print \"Number of subsetted records: %s\" % db.query('dbRECORD_COUNT')\n # We want the most recent first for the comparison with nquakes\n db.sort(['time'], reverse=True)\n if verbose:\n print \"Number of sorted records: %s\" % db.query('dbRECORD_COUNT')\n if nquakes:\n if db.query('dbRECORD_COUNT') > nquakes:\n db[3] = nquakes - 1\n min_time = db.getv('time')[0]\n db.subset(\"time >= %s\" % min_time)\n else:\n override_oldest = now - override_timerange\n if verbose:\n print \"Override time defined - get events in the last %s seconds - time > %s\" % (override_timerange, override_oldest)\n db.subset(\"time >= %s\" % override_oldest)\n # Sort in normal time - we want the most recent events plotted on top\n db.sort(('time'))\n if verbose:\n print \"Number of records without subset on time: %s\" % db.query('dbRECORD_COUNT')\n '''\n Build event dictionary\n '''\n event_dict = {'metadata':{},'events':{}}\n\n '''\n Build metadata dictionary\n '''\n if nquakes:\n event_dict['metadata']['max_nquakes'] = nquakes\n event_dict['metadata']['oldest_time_readable'] = epoch2str( int(min_time), \"%H:%M UTC %A %B %o, %Y\" )\n event_dict['metadata']['oldest_time'] = int(min_time)\n event_dict['metadata']['type'] = 'event_limited'\n elif override_oldest:\n event_dict['metadata']['time_range'] = int(override_timerange)\n event_dict['metadata']['oldest_time_readable'] = epoch2str( int(override_oldest), \"%H:%M UTC %A %B %o, %Y\" )\n event_dict['metadata']['oldest_time'] = int(override_oldest)\n event_dict['metadata']['type'] = 'time_limited'\n event_dict['metadata']['modification_time'] = int(time.time())\n event_dict['metadata']['modification_time_readable'] = epoch2str( int(time.time()), \"%H:%M UTC %A %B %o, %Y\" )\n\n '''\n Build event dictionary\n '''\n events = {}\n for i in range(db.query('dbRECORD_COUNT')):\n db[3] = i\n if verbose:\n epoch_time, orid = db.getv('time', 'orid')\n print \"\\tRecord number is: %s Orid is: %d Time is: %s\" % (db[3], orid, epoch2str(epoch_time, '%Y-%m-%d %H:%M:%S'))\n\n orid = db.getv('orid')[0]\n\n if orid in netmag_dict:\n events[i] = {}\n for ef in element_fields:\n # Parse values\n if ef is 'local_timestring' or ef is 'utc_timestring' or ef is 'time':\n value = dbgetv(db, 'time')[0]\n difference = float(now) - float(value)\n if difference < 6 * 3600:\n color = 'red'\n elif difference < 12 * 3600:\n color = 'orange'\n elif difference < 24 * 3600:\n color = 'yellow'\n elif difference < 72 * 3600:\n color = 'chartreuse'\n elif difference < 168 * 3600:\n color = 'blue'\n else:\n color = 'grey'\n events[i]['color'] = color\n elif ef is 'depth':\n value = dbgetv(db, 'depth')[0]\n elif ef is 'auth':\n value = dbgetv(db, 'auth')[0]\n elif ef is 'magnitude':\n # Magnitude\n # mlval, mbval, msval, magnitudeval, magtypeval = db.getv('ml', 'mb', 'ms', 'magnitude', 'magtype')\n # Null magnitude is -999.00\n magnitudeval = netmag_dict[orid]['magnitude']\n magtypeval = netmag_dict[orid]['magtype']\n if int(magnitudeval) > 0:\n scale = magtypeval\n value = '%.1f' % magnitudeval\n else:\n scale = ''\n value = 'N/A'\n events[i]['scale'] = scale\n else:\n value = dbgetv(db, ef)\n\n # Override formatting for specific fields\n if ef is 'lat' or ef is 'lon':\n value = '%.4f' % value\n elif ef is 'local_timestring':\n value = epoch2str( value, \"%H:%M:%S %Z %A %B %o, %Y\", \"US/Pacific\" )\n elif ef is 'utc_timestring':\n value = epoch2str( value, \"%H:%M:%S UTC %A %B %o, %Y\" )\n events[i][ef] = value\n\n full_lat, full_lon = db.getv('lat', 'lon')\n events[i]['grname'] = (grname(full_lat,full_lon)).title()\n events[i]['srname'] = (srname(full_lat,full_lon)).title()\n\n event_dict['events'] = events\n\n # Dump JSON file\n f = open(bufferfile, 'w') \n json.dump(event_dict, f, sort_keys=True, indent=2)\n f.flush()\n\n # Move the file to replace the older one\n try:\n os.rename(bufferfile, finalfile)\n except OSError:\n print \"Cannot rename JSON file from %s to %s\" % (bufferfile,finalfile)\n\n if verbose:\n print \"End: Creating main JSON file '%s' for all stations %s\" % (finalfile, time.strftime(\"%a, %d %b %Y %H:%M:%S +0000\", time.gmtime()))\n\n db.close()\n return 0", "def main():\n with requests.get(API_URL) as response:\n response.raise_for_status()\n data = response.json()\n\n title = data[\"title\"]\n extract = data[\"extract\"]\n\n click.secho(title, fg=\"green\")\n click.echo(textwrap.fill(extract))", "def main():\n feed_url = ( len(sys.argv) > 2 ) and sys.argv[2] or FEED_URL\n\n f = AmazonAdFeed(feed_url)\n f.STATE_FN = 'link_amazon_ads_state'\n \n if len(sys.argv) > 1 and sys.argv[1] == 'rss':\n print f.scrape_rss()\n else:\n print f.scrape_atom()", "def sources(self):\n raise NotImplementedError()", "def run(self) -> None:\n self.urls_list = self._create_api_ulr_list()\n self.results = self._sort_results(\n AsyncGetAPI(\n self.urls_list, self.threads, max_requests=self.max_requests\n ).results\n )", "def main(args):\n\n #gets urls based on sections and creates basic directories\n stack_exchange_data = get_data(args.filename)\n zip_directory, corpus_directory = args.zip_path, args.dest_path\n setup(zip_directory, corpus_directory)\n\n for (section, url) in stack_exchange_data:\n #creates directories for the current SE site\n zip_file_path, unzipped_folder, corpus_section_directory = section_setup(\n section, zip_directory, corpus_directory)\n\n done_signal_path = os.path.join(corpus_section_directory, \".done\")\n if os.path.isfile(done_signal_path):\n continue\n\n print(\"Starting \" + section)\n\n #downloads and unzips data release for a site\n load(url, zip_file_path, unzipped_folder)\n\n #gets the links data from the links table for the site\n links = get_links(unzipped_folder)\n\n #gets post data from the posts table\n posts = get_posts(unzipped_folder)\n\n #gets post history\n posthistory = get_post_history(unzipped_folder)\n\n #creates the clusters of related and duplicate posts for a site,\n #based on links data\n # clusters, related, duplicates, unique_posts = gen_clusters(links)\n clusters = iter_clusters(links, posts, posthistory)\n\n #writes cluster information to json files\n write_json_files(clusters, corpus_section_directory)\n \n # put completion marker in folder so we can skip it next time\n with open(done_signal_path, \"w\") as f:\n print(\"\", file=f)\n\n print(\"Completed \" + section)", "def __get_sources__(self):\n\n # Let's go to the Apt temporal dir.\n os.chdir(self.conf['AptTmp'])\n\n # Define a global Source file, all the *_Sources files are going to be in this file.\n global_sources_file = open(self.conf['CodeName'] + '_Sources', 'w')\n\n\t\t# The main/debian-installer is in main, so remove it.\n\t\tcomponents = self.conf['Components']\n\t\tif 'main/debian-installer' in components:\n\t\t\tcomponents.remove('main/debian-installer')\n\n # For every component defined...\n for component in components:\n # Download the Packages.gz file\n file = self.__get_packages_file__(self.conf[\"Mirror\"], \\\n \"%s_%s_Sources\" % (self.conf['CodeName'], component), \\\n component, \"source\" + \"/Sources.gz\")\n\n # \"cat\" it into the global_packages_file\n for line in file:\n print >>global_sources_file, line,\n file.close()\n\n\t\tglobal_sources_file.close()\n\t\treturn open(self.conf['CodeName'] + '_Sources', 'r')", "def main():\n app = QtWidgets.QApplication(sys.argv)\n\n dataSource = DataSource(\"https://my-json-server.typicode.com/ochkarik05/jsonservers/videos\")\n\n player = Player()\n\n playList = PlayList(player)\n\n playListScanner = PlayListScanner(dataSource)\n\n playListScanner.dataLoaded.connect(playList.set_track_list)\n\n player.trackFinished.connect(playList.next)\n\n gui = MainWindow(player)\n\n gui.show()\n gui.resize(640, 480)\n\n gui.setWindowState(Qt.WindowFullScreen)\n sys.exit(app.exec_())", "def ingest_data(args):\n fetchopts = {\n \"fixtures\": FIXTURES,\n \"startyear\": args.start_year or fetch.STARTYEAR,\n \"endyear\": args.end_year or fetch.ENDYEAR\n }\n\n folder, num_series = fetch.fetch_all(**fetchopts)\n\n fcsv, num_rows = wrangle.wrangle_csv()\n fjson, _ = wrangle.wrangle_json()\n\n return (\n \"Ingested %i rows in %i time series to %s\\n\"\n \"Wrote JSON data to %s\\n\"\n \"Wrote CSV data to %s\"\n ) % (num_rows, num_series, folder, fcsv, fjson)", "def get_results():\n # store info in a dictionary {name -> shortname}\n res = {}\n session = requests.Session()\n handle_url('http://www.creators.com/comics/cat-seeall.html', session, res)\n save_result(res, json_file)", "def _read_sources_json(self) -> dict:\n df = pd.read_json(self.file_path, orient=\"index\")\n sources_info = {}\n for k in df:\n if any(df[k].apply(lambda x: isinstance(x, dict))):\n continue\n v = df[k].unique()\n sources_info[k] = v[0] if len(v) == 1 else v\n sources_info[\"path\"] = self.file_path\n return sources_info", "def feed(self) -> None:" ]
[ "0.67541414", "0.6721273", "0.66271365", "0.639896", "0.6346963", "0.6091228", "0.60303104", "0.5948088", "0.594113", "0.58209115", "0.5778367", "0.5776951", "0.5746344", "0.5687336", "0.56842184", "0.56672835", "0.5662965", "0.5631256", "0.5610598", "0.5607907", "0.5564241", "0.5558169", "0.555387", "0.55468404", "0.5534471", "0.5529407", "0.5518873", "0.5510035", "0.55089104", "0.55056435", "0.54882765", "0.5482794", "0.54744774", "0.54727525", "0.54590523", "0.54510945", "0.5440066", "0.54380846", "0.5436803", "0.5424846", "0.5390897", "0.5375267", "0.53749543", "0.53730714", "0.53674173", "0.5367084", "0.53661853", "0.5366092", "0.5361901", "0.5348328", "0.53366476", "0.53348047", "0.53347963", "0.53242177", "0.5323267", "0.53223443", "0.531804", "0.53159815", "0.53080374", "0.5300105", "0.52991575", "0.529686", "0.52917755", "0.52900344", "0.5288924", "0.5286745", "0.5282433", "0.52772963", "0.5277149", "0.52737755", "0.52734977", "0.52640885", "0.52636623", "0.52613693", "0.5257988", "0.5256545", "0.52562785", "0.52555394", "0.52494556", "0.52473044", "0.5240561", "0.5237959", "0.52373415", "0.5235121", "0.52339625", "0.52332896", "0.52293867", "0.52168787", "0.5214841", "0.52113575", "0.5210772", "0.520534", "0.5201886", "0.5201114", "0.52009225", "0.51923805", "0.51920635", "0.5191094", "0.51899093", "0.5189492" ]
0.6417497
3
Collect characters while within a source record
def characters(self, content): if self.in_source: self.chars += content
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def characters(self, data):\n pass", "def cleanup_raw_data(buf):\n raw = str(buf, encoding='iso-8859-1').strip()\n records = raw.splitlines()\n return records", "def extractCharacters(self):\n \n length, high=self.getSize() ##geting size of LineFrame object - high and length\n vHisto = self.vLinesHistogram()\n spaceLength = findSpaceLength(vHisto,high) ##finding of expected length of Space in line\n position = 0 ##position, from where findChar is serching for character\n Line=[] ##list of words in line\n Word=[] ##list of characters in word\n correction=0\n End = False\n while not End: ##while not End of the line, search for characters\n position, char, correction = self.findChar(position, spaceLength+correction)\n if type(char) == str: #check if returned CharFrame object or repor\n if char == \"Space\": #Space was finded in line, end of word, Word list append to Line list, and new Word list started\n Line.append(Word)\n Word=[]\n elif char == \"Enter\": ##Finden end of line, Wor list closed and appened to Line list, end of method, returned Line list\n Line.append(Word)\n #for i in range(0,len(Line)):\n #for j in range(0, len(Line[i])):\n #Line[i][j].savePicture(str(i)+\"kafel\"+str(j)+\".bmp\",\"BMP\")\n return Line\n else: ## Character finden in line, append CharFrame object to Word list\n Word.append(char)", "def read_chars(self):\n char_data = []\n for word in self.train_data:\n if word == self.eos or word == self.sos:\n continue\n if self.unit == \"oracle\":\n if '+' in word:\n tags = word.split('+')\n word_tag = tags[0].split(':')\n word = word_tag[1]\n if self.unit == \"morpheme\":\n word = re.sub(\"@@\", \"\", word)\n char_data.extend([ch for ch in word])\n return char_data", "def _read_characters(self):\n\n # Read the character information table\n for c in range(self.smallest_character_code, self.largest_character_code + 1):\n self._process_char(c)", "def parseC(self, field, data):\r\n return str(data.rstrip(b'\\0 '), self.encoding, errors='replace')", "def _translate_string(self, data):\n data = data.encode('iso-8859-1', errors='replace')\n\n for index, char in enumerate(data):\n yield self._meta.characters - 1 - self._ct[char]", "def _characters(self):\n self.characters = list(\n set([item for sublist in self.grid for item in sublist])\n )\n return self.characters", "def _peek_char(self):\n if self.read_pos > self.length:\n return \"\"\n\n return self.data[self.read_pos]", "def characters(self, in_chars):\n self.char_buffer.append(in_chars)", "def cleaning_up(self):\n # find all non-letter-no-digit except whitespace and \"-\"\n try:\n pattern = re.compile(\"[a-zA-Z0-9\\\\s\\\\-]\")\n badChars = re.sub(pattern, '', string.printable)\n logging.debug(\"Bad chars: {}\".format(badChars))\n # define translate table\n remap = dict.fromkeys(badChars)\n logging.debug(remap)\n table = str.maketrans(remap)\n result = \"\"\n with open(self.input) as infile:\n lines = (line.strip() for line in infile)\n for line in lines:\n if len(line) == 0:\n continue\n else:\n logging.debug(line)\n result = result + \" \" + line.translate(table)\n # Since the input file only has one line, we can use the following\n # code. For general use, I kept above code.\n # result = line.translate(remap)\n # break;\n except LookupError as e:\n logging.exception(\"Lookup Error: {}\".format(e.strerror))\n except IOError as e:\n logging.exception(\"IO Error: {}\".format(e.strerror))\n except:\n logging.exception(\"Unknown Error\")\n return result.strip()", "def read_until(self, chars):\n\n start_index = self.index\n\n while self.index < self.length and self.xtext[self.index] not in chars:\n self.index += 1\n\n assert self.index < self.length\n\n return self.xtext[start_index:self.index]", "def get_characters(self):\n return self.characters", "def dissect(self, text):", "def filter_record(self, record):\n if len(record) >= self.max_length:\n return record[:self.max_length]\n else:\n return record", "def _grab_unascii(self):\r\n unascii = \"\"\r\n while self._char != -1 and not self._char in \"\\x00\\t\\r\\n\":\r\n unascii += self._char\r\n self._get_char()\r\n return unascii", "def clean_substr(self, match_obj):\n x = MLStripper()\n x.feed(match_obj.group(1).strip())\n return x.get_fed_data()", "def _read_char(self):\n if self.read_pos >= len(self.data):\n self.char = \"\"\n else:\n self.char = self.data[self.read_pos]\n\n self.pos = self.read_pos\n self.read_pos += 1", "def buffer_before_token(self):\n r = \"\".join(i for i in map(lambda x: x.decode(\"utf-8\"), self.buffer))\n self.buffer = []\n return r", "def characters(self, content):\n if self._current_tag:\n self._buffer.append(content)", "def read_chars(self, snapshot: Bug, location: FileLocationRange) -> str:\n # logger.debug(\"Reading characters at %s in snapshot, %s\",\n # location, snapshot.name)\n filename = location.filename\n contents_file = self.read_file(snapshot, filename)\n\n start_at = self.line_col_to_offset(snapshot,\n filename,\n location.start.line,\n location.start.column)\n stop_at = self.line_col_to_offset(snapshot,\n filename,\n location.stop.line,\n location.stop.column)\n\n contents = contents_file[start_at:stop_at + 1]\n # logger.debug(\"Read characters at %s in snapshot, %s: %s\",\n # location, snapshot.name, contents)\n return contents", "def _char_data_handler(data):\r\n current.text = data", "def filter_chars(accepted_chars,target):\n while True:\n c = (yield)\n if c.lower() in accepted_chars:\n target.send(c.lower())", "def _parse_till_unescaped_char(stream, chars):\n rv = \"\"\n while True:\n escaped = False\n for c in chars:\n if EscapeCharToken.starts_here(stream, c):\n rv += stream.next() + stream.next()\n escaped = True\n if not escaped:\n c = stream.next()\n if c in chars: break\n rv += c\n return rv, c", "def prepseq(self, seq):\n\n wtf = re.sub(r'\\*$', '', seq)\n return wtf", "def char_to_seq( self, uchar ):\n\t\t\n\t\tlstParts = self._char39[ uchar ].split( '+' ) # [ 'nb', 'ns', 'nb', ... ]\n\t\t# Force evaluation with globals definition and local object definition (say on self.x)\n\t\treturn [ eval( '_'+code, globals(), self.__dict__ ) for code in lstParts ]", "def decode_fn(s_in):\r\n s_out = []\r\n for w in s_in:\r\n if w == '<s>':\r\n continue\r\n elif w=='</s>':\r\n break\r\n s_out.append(w)\r\n s_out = ' '.join(s_out)\r\n return s_out", "def handle_charref(self, number):\n codepoint = int(number[1:], 16) if number[0] in ('x', 'X') else int(number)\n text = six.unichr(codepoint)\n self.result.append(text)\n return text", "def extract_data(self):\r\n self.parse()\r\n lst = []\r\n for i in self.table.text.split(\"\\n\")[3:]:\r\n if i != \"\" and bool(re.search(r'\\d', i)):\r\n lst.append(i.replace(u'\\xa0', ''))\r\n single = lst.pop(-3)\r\n lst = [i + \" \" + j for i, j in zip(lst[::2], lst[1::2])]\r\n lst.append(single)\r\n return lst[0:28]", "def gen_chars(self, lines_str_list):\n char_index_counter = 0\n chars = VGroup()\n for line_no in range(lines_str_list.__len__()):\n chars.add(VGroup())\n chars[line_no].add(\n *self.lines_text.chars[\n char_index_counter : char_index_counter\n + lines_str_list[line_no].__len__()\n + 1\n ]\n )\n char_index_counter += lines_str_list[line_no].__len__() + 1\n return chars", "def _process_infores(source: str) -> str:\n # don't touch something that already looks like an infores CURIE\n if source.startswith(\"infores:\"):\n return source\n\n if self.filter:\n infores = self.filter.sub(self.substr, source)\n else:\n infores = source\n infores = self.prefix + \" \" + infores\n infores = infores.strip()\n infores = infores.lower()\n infores = re.sub(r\"\\s+\", \"_\", infores)\n infores = re.sub(r\"\\.+\", \"_\", infores)\n infores = re.sub(r\"[\\W]\", \"\", infores)\n infores = re.sub(r\"_\", \"-\", infores)\n\n infores = \"infores:\" + infores\n return infores", "def sliptText(text):\n\treturn [char for char in text]", "def __read_until(self, buffer, char, break_space=False):\n\t\tret = []\n\t\ttoken = buffer.read(1)\n\t\twhile token != char:\n\t\t\tif break_space and token.isspace():\n\t\t\t\treturn\n\t\t\tret.append(token)\n\t\t\ttoken = buffer.read(1)\n\t\t\tif not token:\n\t\t\t\tbreak\n\t\treturn \"\".join(ret)", "def characters(self, data):\n self._CharCache += data", "def __read_alpha(self, buffer):\n\t\tret = []\n\t\ttoken = buffer.read(1)\n\t\twhile token.isalpha():\n\t\t\tret.append(token)\n\t\t\ttoken = buffer.read(1)\n\t\tif token:\n\t\t\tbuffer.seek(-1, SEEK_CUR)\n\t\treturn \"\".join(ret)", "def lines_from_char(character):\n query = f\"\"\"\nSELECT script_l FROM script\nJOIN characters \nON characters.char_id = script.characters_char_id\nWHERE name = '{character}'\n\"\"\"\n data = pd.read_sql_query(query,engine)\n return data.to_json(orient=\"records\")", "def strings(filename, min_length=4):\n with open(filename, errors=\"ignore\") as f:\n result = \"\"\n\n for c in f.read():\n if c in string.printable:\n result += c\n continue\n\n if len(result) >= min_length:\n yield result\n\n result = \"\"\n\n if len(result) >= min_length: # catch result at EOF\n yield result", "def handle_charref(self,name):\r\n self.handle_data(unichr(int(name)))\r\n #self.handle_data(\"(charref %s)\" % name)\r\n #print \"handle_charref\", name\r\n #raise NotImplemented\r", "def minimalTextCleaning(row, field):\n\n # force encoding\n encoded_text = row[field].encode(encoding = 'ascii',errors = 'replace')\n decoded_text = encoded_text.decode(encoding='ascii',errors='strict')\n remove_funky_chars = str(decoded_text).replace(\"?\", \" \")\n lower_case = str(remove_funky_chars).lower().strip()\n\n # strip redundant whitespace\n cleaned_text = re.sub(' +', ' ', lower_case)\n\n\n # strip signature lines\n cleaned_text = cleaned_text.replace(\"_\", \"\")\n\n return cleaned_text", "def scan(self):\n self.tokfile = open(self.tokfile_path, 'w')\n word = ''\n for line in open(self.srcfile):\n for ch in line:\n if ch in alphanum: \n word += ch\n else:\n if word:\n try:\n self.print_tok('$int', int(word))\n except ValueError:\n if word in self.reserved: \n self.print_tok('$' + word)\n else:\n self.print_tok('$id', word)\n if ch in special:\n self.print_tok(ch)\n word = ''\n self.tokfile.close()", "def extract_chars(infile, n=10000):\n reader = partial(get_chars, n)\n return read_on(reader, infile)", "def record_finder(lines):\n for line in lines:\n if not line.strip():\n continue\n if line.startswith(\"@\"): #separate each block of reads by @\n try:\n yield curr\n except:\n pass\n curr = []\n curr.append(line.strip())\n else:\n curr.append(line.strip())\n if curr:\n yield curr #Sandra et al. 2019", "def transform_seq(seq):\n # TODO add character checking based on ASCII code\n return \"\".join(\"\" if aa in msa_characters else aa for aa in seq)", "def extractData():\n for line in src:\n line = str(line) # now each line is a string\n line = line.rstrip()\n if '%%' in line: #header\n yield line\n for desired in desStr:\n if desired in line:\n yield line", "def _extract_kiss_text(self, raw_slice):\n self.text = self.frame[raw_slice + 3:]", "def tokenize_chars(line):\n return", "def preprocess_char(self):\n self.char_to_id, self.unk_char_list = self.build_vocab(mode=\"char\")\n self.subword_vocab_size = len(self.char_to_id)\n with open(self.sub_vocab_file, 'wb') as f:\n pickle.dump((self.char_to_id, self.unk_char_list, self.max_word_len), f)", "def check_chars_data_fields(header,\r\n mapping_data,\r\n warnings):\r\n\r\n allowed_data_field_chars = \"+-%./ :,;_\" + digits + letters\r\n allowed_sampleid_chars = \".\" + digits + letters\r\n correction = 1\r\n\r\n sample_id_field = \"SampleID\"\r\n fields_to_skip = [\"BarcodeSequence\", \"LinkerPrimerSequence\",\r\n \"ReversePrimer\"]\r\n\r\n for curr_field in range(len(header)):\r\n if header[curr_field] in fields_to_skip:\r\n continue\r\n if header[curr_field] == sample_id_field:\r\n valid_chars = allowed_sampleid_chars\r\n else:\r\n valid_chars = allowed_data_field_chars\r\n for curr_data in range(len(mapping_data)):\r\n # Need to skip newline characters\r\n curr_cell = mapping_data[curr_data][curr_field].replace('\\n', '')\r\n for curr_char in curr_cell:\r\n if curr_char not in valid_chars:\r\n warnings.append(\"Invalid characters found in %s\\t%d,%d\" %\r\n (mapping_data[\r\n curr_data][curr_field].replace(\r\n '\\n', ''),\r\n curr_data + correction, curr_field))\r\n break\r\n\r\n return warnings", "def _create_char_spinner():\r\n while True:\r\n for c in '|/-\\\\':\r\n yield c", "def char_encoding(self, data):\n _buffer = list()\n for word in data:\n chars = self.word_to_chars(word)\n _buffer.append(self.padding(chars, self.max_word_len, self.char_to_id[\"<PAD>\"]))\n return _buffer", "def get_char(self):\n for i in range(1,5):\n c = self.file[self.index:self.index+i] \n try:\n u = c.decode(self.encoding)\n break\n except UnicodeDecodeError:\n if i==4:\n raise \n\n self.index+=i\n\n if self.index > len(self.file):\n self.eof=True\n return None\n\n if u =='\\n':\n self.line +=1\n self.column = 0\n else:\n self.column+=1\n\n return u", "def preprocess_msg(self):\n self.tmp_msg = self.tmp_msg.lower()\n cleared = ''\n for ch in self.tmp_msg:\n if ch in string.ascii_lowercase:\n cleared += ch\n\n c = ''\n for ch in cleared:\n c += '{:02d}'.format(ord(ch) - 97)\n if len(c) % 4 != 0:\n c += '99'\n self.tmp_msg = c\n\n super().preprocess_msg()", "def out2inp(iterable,lookFor='CHARMM>',CC='!',maxDrought=1000):\n n = 0\n for line in iterable:\n line = line.strip()\n if line.startswith(lookFor):\n line = line.split(lookFor)[1]\n line = line.split(CC)[0]\n if line:\n yield line\n n = 0\n else:\n n += 1\n if n > maxDrought:\n return", "def chars(count):\n\n global offset\n\n bytes=midifile[offset:offset+count]\n offset+=count\n return bytes", "def _full_content():\n return string.ascii_letters", "def advent_9b(file_name):\n with open(file_name) as input_file:\n line = input_file.readline()\n garbage_chars = []\n get_group(line, 0, 1, [], garbage_chars)\n return len(garbage_chars)", "def get_filtered_record(csv_fname):\n with open(csv_fname, \"r\") as student_records:\n for student_record in csv.reader(student_records):\n converted = [re.sub('[^a-z0-9]+', '', x.lower()) for x in student_record]\n yield converted", "def repeated_concat():\n # Strings are inmutable, so a new one must be created during each += op\n letters = ''\n with open('lorem.txt', 'rU') as document:\n start = time() # record the start time (in seconds)\n for c in document:\n if c.isalpha():\n letters += c\n end = time() # record the end time (in seconds)\n return (end - start)", "def _decode(self, obj, ctx):\n\n chrlist = []\n\n for ord_ in obj:\n if ord_ == 0xFFFF:\n break\n chrlist.append(get_chr(ord_))\n \n return ''.join(chrlist)", "def loop_escaped(val, c):\n if not val:\n val = ''\n val = as_unicode(val)\n rc = re.compile(r'([^%s\\\\]|\\\\.)*' % re.escape(c))\n pos = 0\n while pos < len(val):\n if val[pos] == c:\n pos += 1\n continue\n m = rc.match(val, pos)\n if not m:\n raise Exception('rx bug')\n pos = m.end()\n yield unescape(m.group(0))", "def parse_alpha_encoding(src: str) -> Problem:\n # remove comments\n src = list(filter(lambda l: l.strip().find(';') != 0 and l.strip() != '', src.splitlines()))\n\n # problem is only first two lines (anything afterwards is ignored)\n runs = src[0:2]\n return [\n [[1 + ord(c) - ord('A') for c in group] for group in l.split()] for l in runs\n ]", "def extract_characters(word):\n char_bbs = []\n column = 0\n char_start = -1\n while column < word.shape[1]:\n while not word[:, column].any():\n if char_start != -1:\n char_bbs.append(np.s_[:, char_start:column])\n char_start = -1\n column += 1\n if char_start == -1:\n char_start = column\n column += 1\n if char_start != -1:\n char_bbs.append(np.s_[:, char_start:column])\n return char_bbs", "def consume(self, data: Collection[tuple[SourceRange, str, Verdict]]) -> list[tuple[SourceRange, str, Verdict]]:\n if data:\n self.lines().extend(data)\n self.raw = '\\n'.join(s for _, s, _ in self.lines())\n self.extent = SourceRange.from_locations(self.lines()[0][0].start, self.lines()[-1][0].end)\n return []", "def _string_data(self, data):\n print(\"_string_data:\")\n string_to_print = []\n for i in data[::2]:\n string_to_print.append(chr(i))\n print(string_to_print)", "def parse(record):\n\n #Extract individual parts of the FASTA record\n\n identifier = record.id #The sequence's Id\n sequence = record.seq #The sequence itself\n sequence = sequence.upper() #Turns all the nucleotides to upper case\n\n return identifier, sequence", "def __init__ ( self , seq , pattern ):\n\t\tif pattern . search ( seq ):\n\t\t\tprint \" Warning : sequence contains illegal characters \"\n\t\tself . data = seq . upper ()", "def _read_chars(filename):\n with open(filename, \"r\") as f:\n return list(f.read())", "def structure_data(characters):\n # Ignore superfluous diachritics and optional symbol\n IGNORE = [\"ˈ\", \"ˌ\", \"'\", \"̪\", \"̞\", \"ˣ\", \"̯\", \"-\", \"(\", \")\", \"[\", \"]\"]\n out = []\n for i, c in enumerate(characters):\n if c in IGNORE:\n continue\n # \"ː\" should be part of the character (no space between), also account for variation\n # in vowel length character used\n elif c == \"ː\" or c == \":\":\n out[-1] += \"ː\"\n # Do not add the optional characters, either (In Finnish seems to just be a glottal stop)\n elif i > 0 and characters[i-1] == \"(\" and characters[i + 1] == \")\":\n continue\n else:\n out.append(c)\n\n return ' '.join(out)", "def _text(self, text: str) -> None:\n for character in text:\n self._push(character)", "def ring_characters(self):\n return self._charset", "def _process_char(self, c):\n\n width_index, height_index, depth_index, italic_index, tag, remainder = self._read_char_info(\n c)\n\n # Get the parameters in the corresponding tables\n if width_index != 0:\n width = self._read_fix_word_in_table(tables.width, width_index)\n else:\n width = 0\n # euex10 has this case\n # raise ValueError(\"Zero width character for character code %u\" % (c))\n\n if height_index != 0:\n height = self._read_fix_word_in_table(tables.height, height_index)\n else:\n height = 0\n\n if depth_index != 0:\n depth = self._read_fix_word_in_table(tables.depth, depth_index)\n else:\n depth = 0\n\n if italic_index != 0:\n italic_correction = self._read_fix_word_in_table(\n tables.italic_correction, italic_index)\n else:\n italic_correction = 0\n\n # Interpret the tag field\n lig_kern_program_index = None\n next_larger_char = None\n extensible_recipe = None\n if tag == LIG_TAG:\n lig_kern_program_index = remainder\n elif tag == LIST_TAG:\n next_larger_char = remainder\n elif tag == EXT_TAG:\n extensible_recipe = self._read_extensible_recipe(remainder)\n\n if extensible_recipe is not None:\n # Fixme: self registration ?\n TfmExtensibleChar(self.tfm,\n c,\n width,\n height,\n depth,\n italic_correction,\n extensible_recipe,\n lig_kern_program_index,\n next_larger_char)\n\n else:\n # Fixme: self registration ?\n TfmChar(self.tfm,\n c,\n width,\n height,\n depth,\n italic_correction,\n lig_kern_program_index,\n next_larger_char)", "def convert_ascii_field(string):\n values = []\n for codepoint in [s for s in string.split(DATA_FILE_CODEPOINT_SEPARATOR) if (s != DATA_FILE_VALUE_NOT_AVAILABLE) and (len(s) > 0)]:\n #if DATA_FILE_CODEPOINT_JOINER in codepoint:\n # values.append(u\"\".join([hex_to_unichr(c) for c in codepoint.split(DATA_FILE_CODEPOINT_JOINER)]))\n if (codepoint.startswith(DATA_FILE_ASCII_NUMERICAL_CODEPOINT_START)) or (codepoint.startswith(DATA_FILE_ASCII_UNICODE_CODEPOINT_START)):\n values.append(hex_to_unichr(codepoint))\n else:\n values.append(codepoint)\n return values", "def char_strip(self):\n\n if not self.file_list:\n self.print_to_log(\"No files fit parameters, exiting\")\n return None\n\n\n result = []\n\n #pass list of files, set to inplace, and byte mode\n fi = fileinput.FileInput(self.file_list,\n inplace=1,\n mode='U')\n fname = \"\"\n count = 0\n self.error = 0\n for line in fi:\n\n #create info for logging\n if fi.isfirstline():\n #skip for first file\n if fi.lineno() > 1:\n result.append(\"Processed %s replaced '%s' by '%s' a total of %s\" % (\n fname, self.char_to_strip, self.char_for_replace, str(count)))\n count = 0\n fname = fi.filename()\n ltemp = ''\n #test and replace\n for char in line:\n if char == self.char_to_strip:\n count += 1\n #if you need to handle occurrences in the batch file\n self.error = 1\n char = self.char_for_replace\n ltemp += char\n sys.stdout.write(ltemp)\n fname = fi.filename()\n #logging for last file\n result.append(\"Processed %s replaced '%s' by '%s' a total of %s\" % (\n fname, self.char_to_strip, self.char_for_replace, str(count)))\n fi.close()\n #write out to log\n for item in result:\n self.print_to_log(item)", "def recordval(record, key):\n return re.sub(r'\\s+', ' ', raw_recordval(record, key))", "def preprocess(self, text):\r\n return text", "def get_text(self):\n return self.text[:500]", "def process(self,line):\n\n pattern = re.compile(\"@.*?@\")\n matches = pattern.findall(line)\n for m in matches:\n replacement = r\"<small>{}</small>\".format(re.escape(m[1:-1]))\n line = pattern.sub(replacement,line)\n\n return line", "def _read(self, valid):\n start = self.pos\n while valid(self.char) and self.pos < self.length:\n self._read_char()\n\n return self.data[start : self.pos]", "def iter_raw_lexemes(self):\n yield \"abcde\"", "def test_character_count(self):\n\t\tself.assertEqual(analyse_text(self.filename)[1], 133)", "def find_unicode(self, modifier=4):\n wide = []\n matches = re.finditer(b'([\\x20-\\x7e]\\x00){' +\n str(modifier).encode('ascii') + b',}', self.buff)\n\n if matches:\n for m in matches:\n wide.append(m.group(0).decode('utf-16'))\n return wide", "def peekChar(self):\r\n\t\tif (self.index + 1) < len(self.source):\r\n\t\t\treturn self.source[self.index + 1]\r\n\t\telse:\r\n\t\t\treturn None", "def transform(self, src_record):\n src_record.colon = ':'\n src_record.space = ' '\n src_record.sep = ' - '\n src_record.prefix = ''\n return \\\n (self.color_levelname \\\n (skip_repeat_line1 \\\n (src_record)))", "def postprocess(self, text):\r\n return text", "def strip_other_charcter():\n pass", "def __iter__(self):\r\n with open(self.source, 'rb') as fin:\r\n for line in fin:\r\n yield to_unicode(line)", "def reparseText(parsed):\n out = []\n buffer = ''\n for type, data in parsed:\n if type is RAW:\n buffer += data\n else:\n if buffer:\n b = re.sub(r'\\s+', ' ' , buffer)\n out.append((RAW, b))\n buffer = ''\n out.append((type,data))\n if buffer:\n b = re.sub(r'\\s+', ' ' , buffer)\n out.append((RAW, b))\n return out", "def __init__(self, current_char, source):\n self.current_char = current_char\n self.source = source", "def read_all_status_characters(self):\n return self.STATUS_CHARACTERS", "def __filter( self, text ):\n return text", "def test_character_count(self):\n self.assertEqual(analyze_text(self.filename)[1], 132)", "def characters(self, ch):\n self.characterElementIdx += 1\n if self.inIgnorableElement == 0:\n if self.characterElementIdx not in self.contentBitSet:\n return\n\n self.html += xmlEncode(str(ch))", "def characters(self, text):\n if text.isspace(): return\n text = str(text)\n if self.curelement == \"residue\":\n self.newresname = text\n elif self.curelement == \"atom\":\n self.newatomname = text\n elif self.curelement == \"useatomname\":\n self.oldatomname = text\n elif self.curelement == \"useresname\":\n self.oldresname = text", "def test_check_chars_data_fields(self):\r\n\r\n header =\\\r\n ['SampleID',\r\n 'BarcodeSequence',\r\n 'LinkerPrimerSequence',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGT', 'AAAA', 's1&data'],\r\n ['s2', 'CGTA', 'AAAA', 's2_data']]\r\n warnings = []\r\n\r\n warnings = check_chars_data_fields(header, mapping_data, warnings)\r\n\r\n expected_warnings = ['Invalid characters found in s-1\\t1,0',\r\n 'Invalid characters found in s1&data\\t1,3']\r\n\r\n self.assertEqual(warnings, expected_warnings)", "def tokenize(src):\n\n pass", "def _parse_row(row: str):\n final_row = []\n for char in row:\n\n # any number N expands into N spaces\n if char in \"12345678\":\n for i in range(int(char)):\n final_row.append(EMPTY_SPACE)\n else:\n final_row.append(char)\n\n return final_row", "def handle_entityref(self, name):\n text = six.unichr(name2codepoint[name])\n self.result.append(text)\n return text", "def ped_line_reader(line):\n s = []\n for c in line:\n if c in '\\t\\n':\n yield ''.join(s)\n s = []\n continue\n\n s.append(c)", "def test_character_count(self):\n self.assertEqual(analyze_text(self.filename)[1], 970)", "def alpha_chars (text):\n for letter in text:\n if letter.isalpha ():\n yield letter" ]
[ "0.63615435", "0.5877636", "0.564212", "0.5450437", "0.53484285", "0.5325434", "0.5275199", "0.52268296", "0.51683784", "0.51639074", "0.5162519", "0.5138824", "0.5114489", "0.51047087", "0.50685495", "0.50610465", "0.5054689", "0.5049034", "0.50311816", "0.50303805", "0.5023495", "0.502008", "0.5017473", "0.50035", "0.49966088", "0.49799478", "0.4971176", "0.4947623", "0.49475273", "0.49458337", "0.49440843", "0.4943475", "0.493214", "0.4930723", "0.49293813", "0.49191764", "0.49153122", "0.49048245", "0.4898939", "0.48951325", "0.48946643", "0.48924342", "0.48923615", "0.48897693", "0.4875503", "0.48736688", "0.48425615", "0.48372513", "0.4835612", "0.48327905", "0.48310572", "0.48243484", "0.48238495", "0.48183995", "0.48183247", "0.48146862", "0.48091248", "0.48033965", "0.4793728", "0.47763324", "0.476304", "0.47489148", "0.47394294", "0.4730322", "0.47271731", "0.47241026", "0.47226986", "0.47205874", "0.47187486", "0.47178757", "0.47172704", "0.47121885", "0.47107854", "0.47059283", "0.47047654", "0.4697982", "0.46978799", "0.46962422", "0.4691352", "0.46902043", "0.4679594", "0.46770802", "0.46736956", "0.4668667", "0.46682662", "0.46654892", "0.46631834", "0.46609813", "0.4650226", "0.4649572", "0.46480533", "0.4644558", "0.4643451", "0.46416402", "0.46415082", "0.4636963", "0.46365565", "0.46349493", "0.46345255", "0.46300223" ]
0.63518363
1
Restrict a value between a min and max.
def clamp(value, min_value, max_value): return max(min_value, min(value, max_value))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _limit(value, min_value, max_value):\n\n if value < min_value:\n return min_value\n if value > max_value:\n return max_value\n return value", "def range_limit(val, minv, maxv):\n\tif (val < minv):\n\t\tval = minv\n\telif (val > maxv):\n\t\tval = maxv\n\treturn val", "def clamp(num, min, max): \n if num < min:\n num = min\n elif num > max:\n num = max\n return num", "def clip(val, val_min, val_max):\n return min(val_max, max(val_min, val))", "def check_valid_range(val, max_val):\n if val < 0:\n val = 0\n elif val > max_val:\n val = max_val\n else:\n pass\n return val", "def clamp(min_value: float, max_value: float, value: float):\n\t\tvalue = min(value, max_value)\n\t\tvalue = max(value, min_value)\n\t\treturn value", "def constrain(inputVal, lower_limit, upper_limit):\n \n if (inputVal < lower_limit):\n return lower_limit\n elif (inputVal > upper_limit):\n return upper_limit\n else:\n return inputVal", "def clamp(self, value, minv, maxv):\n if value > maxv:\n return maxv\n if value < minv:\n return minv\n return value", "def rangeLimit(val, minv, maxv):\n\treturn range_limit(val, minv, maxv)", "def clamp(minimum, value, maximum):\n return max(minimum, min(maximum, value))", "def __limit_value(self, value, v_range):\n if np.isnan(value):\n print('Warning: trying to limit nan value in range {0}'.format(v_range))\n return value\n\n return np.min([v_range[1], np.max([value, v_range[0]])])", "def limitValue(self, value, lowerLimit, upperLimit):\n if value > upperLimit:\n return upperLimit\n elif value < lowerLimit:\n return lowerLimit\n else:\n return value", "def clamp(self, value, minVal, maxVal):\n if type(value) is type(\"string\"):\n return value\n if minVal != None and max != None:\n return max(min(value, maxVal), minVal)\n if minVal != None and maxVal == None:\n return max(value, minVal)\n if minVal == None and maxVal != None:\n return min(value, maxVal)\n return value", "def clamp(lower, value, upper):\n if lower > value:\n return lower\n if upper < value:\n return upper\n return value", "def clamp(value, minval, maxval):\n return sorted((minval, int(value), maxval))[1]", "def constrain(small, value, big):\n return min(max(value, small), big)", "def __validate(self, value: int, extend_range: bool):\n if extend_range:\n bottom, top = self.getRange()\n self.setRange(min(value, bottom), max(value, top))\n return numpy.clip(value, *self.getRange())", "def simplebounds(cls, val, lower, upper):\n if val < lower:\n val = lower\n if val > upper:\n val = upper\n return val", "def bounds(x, xMin, xMax):\n if (x < xMin):\n x = xMin\n elif (x > xMax):\n x = xMax\n return(x)", "def ImposeLimits(Val, MinVal, MaxVal):\n\tif MinVal < Val < MaxVal:\n\t\treturn Val\n\telif Val <= MinVal:\n\t\treturn MinVal\n\telif Val >= MaxVal:\n\t\treturn MaxVal", "def _value_in_bounds(self, val):\n val = self._stepped_value(val)\n\n if val <= self.valmin:\n if not self.closedmin:\n return\n val = self.valmin\n elif val >= self.valmax:\n if not self.closedmax:\n return\n val = self.valmax\n\n if self.slidermin is not None and val <= self.slidermin.val:\n if not self.closedmin:\n return\n val = self.slidermin.val\n\n if self.slidermax is not None and val >= self.slidermax.val:\n if not self.closedmax:\n return\n val = self.slidermax.val\n return val", "def clip(x, min, max):\r\n # see decorator for function body\r\n # for grep: clamp, bound\r", "def clamp(num,start,end):\n if num >= start and num <= end: return num\n elif num < start: return start\n elif num > end: return end", "def clamp(value, mn, mx):\n\n return max(min(value, mx), mn)", "def _bound(x, min_value, max_value):\n return np.maximum(min_value, np.minimum(x, max_value))", "def between(min, max):\n def func(x):\n return min <= x <= max\n return func", "def clamp(value, mini, maxi):\n if value < mini:\n return mini\n elif maxi < value:\n return maxi\n else:\n return value", "def clamp(n, min_, max_):\n return max(min(max_,n),min_)", "def min_max(obj, val, is_max):\n n = getattr(obj, 'maximum' if is_max else 'minimum', None)\n if n == None:\n return\n\n _eq = getattr(obj, 'exclusiveMaximum' if is_max else 'exclusiveMinimum', False)\n if is_max:\n to_raise = val >= n if _eq else val > n\n else:\n to_raise = val <= n if _eq else val < n\n\n if to_raise:\n raise ValidationError('condition failed: {0}, v:{1} compared to o:{2}'.format('maximum' if is_max else 'minimum', val, n))", "def isInRange(val, minv, maxv):\n\treturn val >= minv and val <= maxv", "def clamp(minimum, n, maximum):\n return max(minimum, min(n, maximum))", "def check_range(number: object, min_r: float, max_r: float, name: str = \"\") -> float:\n if not isinstance(number, (float, int)):\n raise FFmpegNormalizeError(f\"{name} must be an int or float\")\n if number < min_r or number > max_r:\n raise FFmpegNormalizeError(f\"{name} must be within [{min_r},{max_r}]\")\n return number", "def Clamp(val, min, max):\n\tval = float(val)\n\tmin = float(min)\n\tmax = float(max)\n\n\tif val < min:\n\t\treturn min\n\telif val > max:\n\t\treturn max\n\telse:\n\t\treturn val", "def is_valid(self, value: int) -> bool:\n return value < self.min_value or value > self.max_value", "def sanitize(cls, value):\n return cls._get_value_inside_range(value, cls.MIN_VALUE,\n cls.MAX_VALUE, cls.DEFAULT_VALUE)", "def sanitize(cls, value):\n return cls._get_value_inside_range(value, cls.MIN_VALUE,\n cls.MAX_VALUE, cls.DEFAULT_VALUE)", "def sanitize(cls, value):\n return cls._get_value_inside_range(value, cls.MIN_VALUE,\n cls.MAX_VALUE, cls.DEFAULT_VALUE)", "def lim(cls, lower=PWM_MIN, upper=PWM_MAX, value=None, less_than_lower_default=None, greater_than_upper_default=None):\n #Sanitise inputs\n if less_than_lower_default is None:\n less_than_lower_default = lower\n if greater_than_upper_default is None:\n greater_than_upper_default = upper\n if not (less_than_lower_default >= lower and greater_than_upper_default <= upper):\n raise Exception(\"LEDStrip.lim(): Defaults %s,%s are not within %s - %s\" % (less_than_lower_default, greater_than_upper_default, lower, upper)) \n if value is None:\n return less_than_lower_default\n \n #Test values\n try:\n if value < lower:\n logging.warn(\" LEDStrip.lim(): Value %s is less than lower limit %s. Setting to %s.\" % (value, lower, less_than_lower_default))\n return float(less_than_lower_default)\n if value > upper:\n logging.warn(\" LEDStrip.lim(): Value %s is greater than upper limit %s. Setting to %s\" % (value, upper, greater_than_upper_default))\n return float(greater_than_upper_default)\n except (ValueError, TypeError, AttributeError):\n return float(less_than_lower_default)\n return float(value)", "def clamp(x: float, min_x: float, max_x: float) -> float:\n if x < min_x:\n return min_x\n elif x > max_x:\n return max_x\n return x", "def view_limits(self, dmin, dmax):\n base = self._select_base(dmin, dmax)\n if mpl.rcParams['axes.autolimit_mode'] == 'round_numbers':\n vmin = base.le(dmin)\n vmax = base.ge(dmax)\n if vmin == vmax:\n vmin -= 1\n vmax += 1\n else:\n vmin = dmin\n vmax = dmax\n\n return mtransforms.nonsingular(vmin, vmax)", "def clip(x, min_value, max_value):\n if max_value is None:\n max_value = np.inf\n if min_value is None:\n min_value = -np.inf\n max_value = C.maximum(min_value, max_value)\n return C.clip(x, min_value, max_value)", "def int_lim(cls, lower=PWM_MIN, upper=PWM_MAX, value=None, less_than_lower_default=None, greater_than_upper_default=None):\n out_float = cls.lim(lower, upper, value, less_than_lower_default, greater_than_upper_default)\n return int(round(out_float))", "def assert_between(value, minval, maxval):\n assert_greater_equal(value, minval)\n assert_less_equal(value, maxval)", "def constrain(amt,low,high):\n if amt < low:\n return low\n elif amt > high:\n return high\n else:\n return amt", "def clip(self, x):\n return self.min_value if x<self.min_value else self.max_value if x > self.max_value else x", "def clamp(n, minn, maxn):\n return max(min(maxn, n), minn)", "def clip_to_output_limits(self, value):\n return max(self.out_min, min(self.out_max, value))", "def constrain(amt, low, high):\n if amt < low:\n return low\n elif amt > high:\n return high\n else:\n return amt", "def set_output_limits(self, min_value, max_value):\n self.out_min = min_value\n self.out_max = max_value\n if self.out_min > self.out_max:\n print(\"set_output_limits(): min must be smaller than max.\")\n self.iterm = self.clip_to_output_limits(self.iterm)\n self.output = self.clip_to_output_limits(self.output)", "def clamp(n, minn, maxn):\n return max(min(maxn, n), minn)", "def limit(val, arr):\n # Make copy\n new = np.array(val)\n extr = minmax(arr)\n # Enforce lower bound\n new = np.maximum(new, extr[0])\n # Enforce upper bound\n new = np.minimum(new, extr[1])\n return new", "def ge(value, limit):\n return value >= limit", "def rangeSample(val, minLim, maxLim):\n\tif val < minLim or val > maxLim:\n\t\tval = randint(minLim, maxLim)\n\treturn val", "def check_out_range(value, lim_1, lim_2):\n lo_lim = min(lim_1, lim_2)\n hi_lim = max(lim_1, lim_2)\n \n if (abs(value) > abs(hi_lim)) or (abs(value) < abs(lo_lim)):\n return True\n else:\n return False", "def pass_selection_val(self, val, val_min=None, val_max=None):\n if (val_min is not None) and (val_max is not None):\n return True if (val > val_min) and (val < val_max) else False\n elif (val_min is None) and (val_max is not None):\n # Check against val_max only. \n return True if (val < val_max) else False\n elif (val_min is not None) and (val_max is None):\n # Check against val_min only. \n return True if (val > val_min) else False\n else:\n msg = \"[WARNING] You performed a cut, but didn't specify any bounds.\"\n raise RuntimeWarning(msg)", "def check_in_range(value, lim_1, lim_2):\n lo_lim = min(lim_1, lim_2)\n hi_lim = max(lim_1, lim_2)\n \n if (abs(value) > abs(lo_lim)) and (abs(value) < abs(hi_lim)):\n return True\n else:\n return False", "def vc_clamp(x, lb, ub):\n\n y = min(x, ub)\n y = max(y, lb)\n\n return y", "def clip_range(x, xlim):\n return min([max([x, xlim[0]]), xlim[1]])", "def is_valid_paid_value_range(value):\n\n min_valid_payment = 1\n max_valid_payment = 1_000_000\n\n if not min_valid_payment <= value <= max_valid_payment:\n raise serializers.ValidationError(\n 'Valor no permitido, debe ser estar entre 1 y 1000000'\n )\n return value", "def is_constrained(value, min_acceptable=None, max_acceptable=None):\n if min_acceptable is not None and value < min_acceptable:\n return False\n if max_acceptable is not None and value > max_acceptable:\n return False\n return True", "def mapRange(num, min1, max1, min2, max2, clamp=True):\n if(clamp and num < min1):\n return min2\n if(clamp and num > max1):\n return max2\n\n num1 = (num - min1) / (max1 - min1)\n num2 = (num1 * (max2 - min2)) + min2\n return num2", "def in_range(data, minval=-np.inf, maxval=np.inf):\n return (minval <= data) & (data <= maxval)", "def cpfclamp(f, min_, max_):\n return min(max(f, min_), max_)", "def _get_min_max_value(min, max, value=None, step=None):\n # Either min and max need to be given, or value needs to be given\n if value is None:\n if min is None or max is None:\n raise ValueError('unable to infer range, value from: ({0}, {1}, {2})'.format(min, max, value))\n diff = max - min\n value = min + (diff / 2)\n # Ensure that value has the same type as diff\n if not isinstance(value, type(diff)):\n value = min + (diff // 2)\n else: # value is not None\n if not isinstance(value, Real):\n raise TypeError('expected a real number, got: %r' % value)\n # Infer min/max from value\n if value == 0:\n # This gives (0, 1) of the correct type\n vrange = (value, value + 1)\n elif value > 0:\n vrange = (-value, 3*value)\n else:\n vrange = (3*value, -value)\n if min is None:\n min = vrange[0]\n if max is None:\n max = vrange[1]\n if step is not None:\n # ensure value is on a step\n tick = int((value - min) / step)\n value = min + tick * step\n if not min <= value <= max:\n raise ValueError('value must be between min and max (min={0}, value={1}, max={2})'.format(min, value, max))\n return min, max, value", "def _validate(self, value, **options):\n\n # this is a workaround to get the correct values of accepted min and max in\n # case they are callable and producing different results on each call.\n current_values = dict()\n current_values[self.CURRENT_MAX_KEY] = None\n current_values[self.CURRENT_MIN_KEY] = None\n options[self.CURRENT_VALUE_KEY] = current_values\n try:\n super()._validate(value, **options)\n except (self.maximum_value_error, self.minimum_value_error):\n equality_min = ''\n equality_max = ''\n\n inclusive_maximum = options.get('inclusive_maximum')\n if inclusive_maximum is None:\n inclusive_maximum = self.inclusive_maximum\n\n inclusive_minimum = options.get('inclusive_minimum')\n if inclusive_minimum is None:\n inclusive_minimum = self.inclusive_minimum\n\n if inclusive_minimum is not False:\n equality_min = self.inclusive_minimum_value_message\n\n if inclusive_maximum is not False:\n equality_max = self.inclusive_maximum_value_message\n\n current_min = current_values.get(self.CURRENT_MIN_KEY)\n if current_min is None:\n current_min = self.accepted_minimum\n\n current_max = current_values.get(self.CURRENT_MAX_KEY)\n if current_max is None:\n current_max = self.accepted_maximum\n\n raise self.range_value_error(self.range_value_message.format(\n param_name=self._get_field_name(**options),\n lower=self._get_representation(current_min),\n upper=self._get_representation(current_max),\n or_equal_min=equality_min, or_equal_max=equality_max))", "def clip(value: float, low: float, high: float) -> float:\n if math.isnan(value):\n return value\n assert low <= high\n return max(low, min(high, value))", "def normalise_modular_range(value, min, max):\n return numpy.mod(value-min, max-min)+min", "def lt(value, limit):\n return value < limit", "def _min_in_bounds(self, min):\n if min <= self.valmin:\n if not self.closedmin:\n return self.val[0]\n min = self.valmin\n\n if min > self.val[1]:\n min = self.val[1]\n return self._stepped_value(min)", "def scale(value,rawmin=100941, rawmax=274919, rangemin=0, rangemax=100):\n\n # Convert the left range into a 0-1 range (float)\n valueScaled = float(value - rawmin) / float(rawmax - rawmin)\n\n # Convert the 0-1 range into a value in the right range.\n value = rangemin + (valueScaled * ((rangemax * 10) - rangemin))\n\n value = value // 10 * 10 // 10 # float to int\n\n return max(value, rangemin) # value must be greater or equal to rangemin", "def intrange(value, name=\"\", value_min=None, value_max=None, zero=False):\n value = __integer(value, \"%s value\" % name, False)\n if value_min is not None:\n value_min = __integer(value_min, \"minimal %s value\" % name, True)\n intvalue(value_min, name, True, True, True)\n if value_max is not None:\n value_max = __integer(value_max, \"maximal %s value\" % name, True)\n intvalue(value_max, name, True, True, True)\n if not zero:\n if value == 0:\n __ex(\"The %s value must not be zero.\" % name, False)\n if (value_min is not None) and (value_max is not None):\n if value_min > value_max:\n __ex(\"The maximal %s value must be greater than the minimal \"\n \"value.\" % name, False)\n if (value_min == value_max) and (value != value_min):\n __ex(\"The %s value can only be %s (depending on further range \"\n \"further range arguments).\" % (name, value_min), False)\n if (value < value_min) or (value > value_max):\n __ex(\"The %s value must be between %s and %s (depending on \"\n \"further range arguments).\" % (name, value_min, value_max),\n False)\n elif value_min is not None:\n if value < value_min:\n __ex(\"The %s value must not be less than %s.\" % (name, value_min),\n False)\n elif value_max is not None:\n if value > value_max:\n __ex(\"The %s value must not be greater than %s.\" %\n (name, value_max), False)", "def clamp(n: int, a: int, b: int):\n return min(max(n, a), b)", "def apply_bound(x, var_min, var_max):\n x.position = np.maximum(x.position, var_min)\n x.position = np.minimum(x.position, var_max)", "def pin_lim(cls, value):\n return cls.int_lim(lower=0, upper=27, value=value, less_than_lower_default=27, greater_than_upper_default=27)", "def __verify_range(value, minimum, maximum):\n if value in range(minimum, maximum):\n return True\n else:\n return False", "def test_threshold_range_a(self):\n code, out, err = self.t.runError(\"--threshold --max 3.1 --min 3.2\")\n self.assertIn(\"The min value must be lower than the max value.\", out)", "def check_value(self, name, min_int, max_int):\n while True:\n numb = input(f\"-- {name} : Entrez une valeur comprise \"\n f\"entre {min_int} et {max_int} : \")\n try:\n check = int(numb)\n if check == 99 or min_int <= check <= max_int:\n break\n except ValueError:\n pass\n return check", "def _check_value_range(self, key: str, value: Any):\n minValue = from_dot_notation(\n field=\".\".join([*self.parents, key]), obj=self.definition\n ).get(\"minValue\", None)\n maxValue = from_dot_notation(\n field=\".\".join([*self.parents, key]), obj=self.definition\n ).get(\"maxValue\", None)\n\n if minValue is not None and value < minValue:\n raise Exception(\n f\"Value for '{key}' is lower than the minimum value (value should be at least {minValue})\"\n )\n if maxValue is not None and value > maxValue:\n raise Exception(\n f\"Value for '{key}' is higher than the maximum value (value should not exceed {maxValue})\"\n )", "def __init__(__self__, *,\n max: pulumi.Input[int],\n min: pulumi.Input[int]):\n pulumi.set(__self__, \"max\", max)\n pulumi.set(__self__, \"min\", min)", "def __init__(__self__, *,\n max: pulumi.Input[int],\n min: pulumi.Input[int]):\n pulumi.set(__self__, \"max\", max)\n pulumi.set(__self__, \"min\", min)", "def is_valid_range(parser, arg, minimum=0, maximum=100):\n if arg < minimum:\n parser.error(\"%s < %s\", arg, minimum)\n else:\n if arg > maximum:\n parser.error(\"%s > %s\", arg, maximum)\n\n return arg", "def view_limits(self, vmin, vmax):\n return vmin, vmax\n # return nonsingular(vmin, vmax)", "def clamp(self):\n self.threshold.data.clamp_(self.min_threshold)", "def testMinMax(self, value):\n\t\tif value > self.oldmax:\n\t\t\tself.oldmax = value\n\t\t\tself.maxBox.SetValue(str(value).encode('utf-8'))\n\t\telif value < self.oldmin:\n\t\t\tself.oldmin = value\n\t\t\tself.minBox.SetValue(str(value).encode('utf-8'))", "def between(x, val1, val2):\n\treturn max(val1, val2) > x > min(val1, val2)", "def map_to_range(val, old_min, old_max, new_min, new_max):\n return new_max - (val - old_min) * (new_max - new_min) / (old_max - old_min)", "def chkLimits(name, value, Min, Max, unit = 'V', Hex = False):\n\n #global Log\n if not Min < value < Max:\n if Hex:\n line = \"%s:0x%X OUT OF LIMITS (0x%X, 0x%X). Test Failed !\" %(name, value, Min, Max)\n else:\n line = \"%s:%F %s OUT OF LIMITS (%F, %f). Test Failed !\" %(name, value, unit, Min, Max)\n Log.logError(line)\n Err.bumpError()\n return False\n if Hex:\n Log.logText(' '+'%s:0x%X expected range from:0x%X To: 0x%X. Test PASS !'% (name, value, Min, Max))\n else:\n Log.logText(' '+'%s:%F %s expected range From:%F %s To: %F %s. Test PASS !'% (name, value, unit, Min,unit, Max, unit))\n return True", "def check_range_value(array, min_=None, max_=None):\n # check lowest and highest bounds\n if min_ is not None and array.min() < min_:\n raise ValueError(\"The array should have a lower bound of {0}, but its \"\n \"minimum value is {1}.\".format(min_, array.min()))\n if max_ is not None and array.max() > max_:\n raise ValueError(\"The array should have an upper bound of {0}, but \"\n \"its maximum value is {1}.\".format(max_, array.max()))\n\n return True", "def elevation_servo_set_min_max(self, min: int, max: int):\n self.elevation_servo.set_min_position(min)\n self.elevation_servo.set_max_position(max)", "def le(value, limit):\n return value <= limit", "def _validate_value(self, value):\n if self.limits[0] <= value <= self.limits[1]:\n return True\n else:\n return False", "def test_min_max_limiting() -> None:\n d = {\n \"one\": [-1, 0, 1],\n \"two\": [2, 3, -1],\n }\n # Update a single column\n df = pd.DataFrame(d)\n #\n # .loc accepts a boolean mask and set of columns to return.\n #\n df.loc[df[\"one\"] < 0, [\"one\"]] = 0\n #\n # one two\n # 0 2\n # 0 3\n # 1 -1\n #\n assert df.iloc[0, 0] == 0\n assert df.iloc[2, 1] == -1\n\n # You can use `clip` to enforce minimum and maximum values for an entire df.\n df = df.clip(lower=0.0)\n assert df.iloc[0, 0] == 0.0\n assert df.iloc[2, 1] == 0.0", "def __init__( # pylint: disable=too-many-arguments\n self,\n min: Optional[float] = None,\n max: Optional[float] = None,\n step: Optional[int] = None, # pylint: disable=redefined-outer-name\n include_min: bool = True,\n include_max: bool = True,\n ) -> None:\n #: The optional minimal allowed value.\n self.min = min\n\n #: The optional maximal allowed value.\n self.max = max\n\n #: The optional step between values.\n self.step = step\n\n #: Whether the minimal value is allowed.\n self.include_min = include_min\n\n #: Whether the maximal value is allowd.\n self.include_max = include_max", "def above_threshold(self, value):\n # We use floating point number here so we have to take care\n return finf(value,self.min) or finf(self.max,value)", "def _setBound(self, value):\n if self._colormap is not None:\n if self._index == 0:\n min_ = value\n max_ = self._colormap.getVMax()\n else: # self._index == 1\n min_ = self._colormap.getVMin()\n max_ = value\n\n if max_ is not None and min_ is not None and min_ > max_:\n min_, max_ = max_, min_\n self._colormap.setVRange(min_, max_)", "def minmin_maxmax( *args ):\n rmin = min( [ mv.min() for mv in args ] )\n rmax = max( [ mv.max() for mv in args ] )\n rmv = cdms2.createVariable( [rmin,rmax] )\n return rmv", "def __init__(self, min_val, max_val):\n self.values = (min_val, max_val)", "def __init__(self, min_val, max_val):\n self.values = (min_val, max_val)", "def __init__(self, min_val, max_val):\n self.values = (min_val, max_val)", "def __init__(self, min_val, max_val):\n self.values = (min_val, max_val)" ]
[ "0.8242318", "0.82057476", "0.77939916", "0.7717922", "0.7658431", "0.76336455", "0.75694007", "0.7560079", "0.75334716", "0.7511125", "0.7503323", "0.7490816", "0.7443682", "0.7372782", "0.73508364", "0.7305762", "0.7283909", "0.7263027", "0.72585297", "0.72390145", "0.7189812", "0.71267784", "0.7126178", "0.71082735", "0.7078759", "0.7069717", "0.70644605", "0.7064237", "0.70349467", "0.7011921", "0.69800913", "0.69423175", "0.6918298", "0.69106966", "0.68916154", "0.68916154", "0.68916154", "0.6887795", "0.6869072", "0.68459266", "0.6845692", "0.68345624", "0.6811967", "0.68031645", "0.6791451", "0.6766357", "0.67646646", "0.67577314", "0.67263746", "0.67201823", "0.67013526", "0.66945153", "0.66791", "0.6654271", "0.664779", "0.6638729", "0.6633888", "0.66188085", "0.6605884", "0.660448", "0.65877527", "0.65854883", "0.6560133", "0.65501595", "0.654893", "0.65393853", "0.6531421", "0.652693", "0.6524714", "0.6516082", "0.65112925", "0.65111977", "0.65001523", "0.6493067", "0.6488919", "0.6486601", "0.64788646", "0.64787716", "0.647775", "0.647775", "0.64719975", "0.6444289", "0.6439807", "0.6428986", "0.64218354", "0.6413216", "0.64010924", "0.6380481", "0.63768035", "0.63713497", "0.6368426", "0.6362485", "0.6344793", "0.6340574", "0.6323823", "0.63180697", "0.63144886", "0.63144886", "0.63144886", "0.63144886" ]
0.78470093
2
Create connection line constraint between item's handle and the port.
def constraint(self, item, handle, glue_item): start = MatrixProjection(self.start, glue_item.matrix_i2c) end = MatrixProjection(self.end, glue_item.matrix_i2c) point = MatrixProjection(handle.pos, item.matrix_i2c) cx = EqualsConstraint(point.x, start.x) cy = BetweenConstraint(point.y, start.y, end.y) return MultiConstraint(start, end, point, cx, cy)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_fixed_distance_to_line_constraint():\n return FixedDistanceToLineConstraint()", "def test_connect(self):\n line, head = self._get_line()\n self.tool.connect(line, head, (120, 50))\n cinfo = self.canvas.get_connection(head)\n self.assertTrue(cinfo is not None)\n self.assertEquals(self.box1, cinfo.connected)\n self.assertTrue(cinfo.port is self.box1.ports()[0],\n 'port %s' % cinfo.port)\n self.assertTrue(isinstance(cinfo.constraint, LineConstraint))\n # No default callback defined:\n self.assertTrue(cinfo.callback is None)\n\n line, head = self._get_line()\n self.tool.connect(line, head, (90, 50))\n cinfo2 = self.canvas.get_connection(head)\n self.assertTrue(cinfo is not cinfo2, cinfo2)\n self.assertTrue(cinfo2 is None, cinfo2)", "def test_item_and_port_glue(self):\n\n ports = self.box1.ports()\n\n # glue to port nw-ne\n sink = self.tool.glue(self.line, self.head, (120, 50))\n self.assertEquals(sink.item, self.box1)\n self.assertEquals(ports[0], sink.port)\n\n # glue to port ne-se\n sink = self.tool.glue(self.line, self.head, (140, 70))\n self.assertEquals(sink.item, self.box1)\n self.assertEquals(ports[1], sink.port)\n\n # glue to port se-sw\n sink = self.tool.glue(self.line, self.head, (120, 90))\n self.assertEquals(sink.item, self.box1)\n self.assertEquals(ports[2], sink.port)\n\n # glue to port sw-nw\n sink = self.tool.glue(self.line, self.head, (100, 70))\n self.assertEquals(sink.item, self.box1)\n self.assertEquals(ports[3], sink.port)", "def ioLineDrag(self, startItem, pos0, pos1, done=False):\n assert isinstance(startItem, PortItem)\n assert isinstance(pos0, QPointF)\n assert isinstance(pos1, QPointF)\n assert isinstance(done, bool)\n\n if self._draggedLineItem is None:\n self._draggedLineItem = DraggedLineItem(pos0, pos1)\n self.addItem(self._draggedLineItem)\n else:\n self._draggedLineItem.setEndpoint(pos1)\n\n vaildItem = None\n\n if QLineF(pos0, pos1).length() > 5.0:\n # Check if line is over other PortItem\n for item in self.items(pos1):\n if isinstance(item, PortItem):\n vaildItem = item\n print item.name()\n break\n\n self._draggedLineItem.showEndpoint(vaildItem is not None)\n\n if done:\n self.removeItem(self._draggedLineItem)\n self._draggedLineItem = None\n\n if vaildItem is not None:\n # Request connection creation\n name1 = startItem.fullname()\n name2 = vaildItem.fullname()\n self.sigCreateConnection.emit(name1, name2)", "def test_port_create_with_binding_information(self):\n network, segments, subnets = self._create_test_segments_with_subnets(3)\n\n # Map the host to the middle segment (by mocking host/segment mapping)\n self._setup_host_mappings([\n (segments[1]['segment']['id'], 'fakehost'),\n (segments[1]['segment']['id'], 'otherhost'),\n (segments[0]['segment']['id'], 'thirdhost')])\n\n response = self._create_port(self.fmt,\n net_id=network['network']['id'],\n tenant_id=network['network']['tenant_id'],\n is_admin=True,\n arg_list=(portbindings.HOST_ID,),\n **{portbindings.HOST_ID: 'fakehost'})\n res = self.deserialize(self.fmt, response)\n self._validate_immediate_ip_allocation(res['port']['id'])\n\n # Since host mapped to middle segment, IP must come from middle subnet\n self._assert_one_ip_in_subnet(response, subnets[1]['subnet']['cidr'])", "def net_acl_iptables_rule(item):\n # defaults\n fmt = {\n 'chain': '-A INPUT',\n 'device': '',\n 'protocol': ' -p tcp',\n 'state': '',\n 'identifier': ' -m comment --comment \"20CACL {}\"'.format(item['name']),\n 'target': ' -j ACCEPT',\n }\n\n if item.get('device', None):\n fmt['device'] = ' -i {}'.format(item.device)\n if item.get('protocol', None):\n fmt['protocol'] = ' -p {}'.format(item.protocol)\n # FIXME parse for false\n if item.get('stateful', False) == True:\n fmt['state'] = ' -m state --state NEW'\n if not item.get('ports', None):\n raise ValueError(\"missing ports\")\n else:\n fmt['ports'] = ' -m multiport --dports={}'.format(','.join(map(str, item['ports'])))\n\n line = \"{chain}{device}{protocol}{state}{ports}{identifier}{target}\".format(**fmt)\n\n return line", "def test_reconnect_same(self):\n line, head = self._get_line()\n self.tool.connect(line, head, (120, 50))\n cinfo = self.canvas.get_connection(head)\n assert cinfo is not None\n item = cinfo.connected\n port = cinfo.port\n constraint = cinfo.constraint\n\n assert item == self.box1\n assert item != self.box2\n\n # connect to box1 again, handle's connected item and port should be\n # the same but connection constraint will differ\n connected = self.tool.connect(line, head, (120, 50))\n cinfo = self.canvas.get_connection(head)\n assert cinfo is not None\n self.assertEqual(self.box1, cinfo.connected)\n self.assertEqual(self.box1.ports()[0], cinfo.port)\n self.assertNotEqual(constraint, cinfo.constraint)", "def slot_constraint(self, item, role_spec):\n return self.kb.slot_value(\n logic.expr(item),\n CONSTRAINT_EXPR,\n logic.expr(role_spec))", "def __init__(self, srcNode, destNode):\r\n super(NodeConnection, self).__init__()\r\n \r\n self.setSrcNode(srcNode)\r\n self.setDestNode(destNode)\r\n \r\n self._srcPt = None\r\n self._destPt = None\r\n self.setArrowSize(10)\r\n \r\n self.Adjust()\r\n self.setFlag(QtGui.QGraphicsItem.ItemIsSelectable, True)", "def _set_constraint(self):\n pass", "def create_connection(\n self,\n from_id: str,\n to_id: str\n ):\n raise NotImplementedError", "def addConnection(self, port1Name, port2Name, connItem):\n assert isinstance(connItem, ConnectionItem)\n\n # Ensure port1Name and port2Name are str, not QString\n port1Name = str(port1Name)\n port2Name = str(port2Name)\n\n node1Name = port1Name.split(':')[0]\n node2Name = port2Name.split(':')[0]\n\n if node1Name == node2Name:\n return False\n\n node1 = self.nodeFromName(node1Name)\n node2 = self.nodeFromName(node2Name)\n\n if node1.isConnected(port1Name) or node2.isConnected(port2Name):\n return False\n\n self.addItem(connItem)\n node1.addConnection(port1Name, connItem)\n node2.addConnection(port2Name, connItem)\n\n assert connItem.startPortName() is not None\n assert connItem.endPortName() is not None\n return True", "def _setup_create_firewall_rule_with_all_params(self, protocol='tcp'):\r\n resource = 'firewall_rule'\r\n cmd = firewallrule.CreateFirewallRule(test_cli20.MyApp(sys.stdout),\r\n None)\r\n name = 'my-name'\r\n description = 'my-desc'\r\n source_ip = '192.168.1.0/24'\r\n destination_ip = '192.168.2.0/24'\r\n source_port = '0:65535'\r\n destination_port = '0:65535'\r\n action = 'allow'\r\n tenant_id = 'my-tenant'\r\n my_id = 'myid'\r\n args = ['--description', description,\r\n '--shared',\r\n '--protocol', protocol,\r\n '--source-ip-address', source_ip,\r\n '--destination-ip-address', destination_ip,\r\n '--source-port', source_port,\r\n '--destination-port', destination_port,\r\n '--action', action,\r\n '--enabled',\r\n '--admin-state-up',\r\n '--tenant-id', tenant_id]\r\n position_names = []\r\n position_values = []\r\n if protocol == 'any':\r\n protocol = None\r\n self._test_create_resource(resource, cmd, name, my_id, args,\r\n position_names, position_values,\r\n description=description, shared=True,\r\n protocol=protocol,\r\n source_ip_address=source_ip,\r\n destination_ip_address=destination_ip,\r\n source_port=source_port,\r\n destination_port=destination_port,\r\n action=action, enabled=True,\r\n tenant_id=tenant_id)", "def setup_rule(self, client):\n pass", "def setup_rule(self, client):\n pass", "def _add_line(self, key, info):\n info = copy.deepcopy(info)\n anticipated_bus = self._get_df_with_new_elements(\"bus\")\n new_lines = []\n required = {\"from_bus_id\", \"to_bus_id\"}\n xor_sets = {(\"capacity\", \"Pmax\"), (\"capacity\", \"Pmin\")}\n optional = {\"Pmin\"}\n for i, line in enumerate(info):\n self._check_entry_keys(line, i, key, required, xor_sets, optional)\n start = line[\"from_bus_id\"]\n end = line[\"to_bus_id\"]\n if start not in anticipated_bus.index:\n raise ValueError(\n \"No bus with the following id for line #%d: %d\" % (i + 1, start)\n )\n if end not in anticipated_bus.index:\n raise ValueError(\n \"No bus with the following id for line #%d: %d\" % (i + 1, end)\n )\n if start == end:\n raise ValueError(f\"to/from buses of line #{i + 1} must be different\")\n if \"capacity\" in line:\n if not isinstance(line[\"capacity\"], (int, float)):\n raise ValueError(\"'capacity' must be a number (int/float)\")\n if line[\"capacity\"] < 0:\n raise ValueError(\"capacity of line #%d must be positive\" % (i + 1))\n # Everything looks good, let's translate this to Pmin/Pmax\n line[\"Pmax\"] = line[\"capacity\"]\n line[\"Pmin\"] = -1 * line[\"capacity\"]\n del line[\"capacity\"]\n elif {\"Pmin\", \"Pmax\"} < set(line.keys()):\n if key == \"new_branch\":\n err_msg = \"Can't independently set Pmin & Pmax for AC branches\"\n raise ValueError(err_msg)\n for p in {\"Pmin\", \"Pmax\"}:\n if not isinstance(line[p], (int, float)):\n raise ValueError(f\"'{p}' must be a number (int/float)\")\n if line[\"Pmin\"] > line[\"Pmax\"]:\n raise ValueError(\"Pmin cannot be greater than Pmax\")\n else:\n raise ValueError(\"Must specify either 'capacity' or Pmin and Pmax\")\n if (\n key == \"new_branch\"\n and anticipated_bus.interconnect[start]\n != anticipated_bus.interconnect[end]\n ):\n raise ValueError(\n \"Buses of line #%d must be in same interconnect\" % (i + 1)\n )\n elif (\n anticipated_bus.lat[start] == anticipated_bus.lat[end]\n and anticipated_bus.lon[start] == anticipated_bus.lon[end]\n ):\n raise ValueError(\"Distance between buses of line #%d is 0\" % (i + 1))\n new_lines.append(line)\n\n if key not in self.ct:\n self.ct[key] = []\n self.ct[key] += new_lines", "def cmd_CONNECTION(self, line):\r\n config = ConnectionOptions(self.terminal)\r\n\r\n try:\r\n config.parseOptions(line)\r\n cmd = config.subCommand\r\n opts = config.subOptions if hasattr(config, 'subOptions') else {}\r\n except usage.UsageError as errortext:\r\n self.terminal.write(\"BUG in usage: {0}\".format(errortext))\r\n else:\r\n if cmd == 'add':\r\n if opts['tag1'] and opts['tag2']:\r\n self.callToUser('addConnection', 'robot', opts['tag1'],\r\n opts['tag2'])\r\n elif cmd == 'remove':\r\n if opts['tag1'] and opts['tag2']:\r\n self.callToUser('removeConnection', 'robot', opts['tag1'],\r\n opts['tag2'])", "def build_connection(self, src, tgt) -> NoReturn:\n # If src and tgt are the same node, src not in node_collection or\n # tgt not in node_collection,\n # then skip this edge.\n if src == tgt or src not in self._nodes_collection or tgt not in self._nodes_collection:\n if src.split(':')[0] not in self._nodes_collection:\n warnings.warn(f\"Graph construct a self-loop node {src}. Ignored.\")\n return\n\n if tgt not in self._nodes_collection[src.split(':')[0]].successor_nodes:\n self._nodes_collection[src.split(':')[0]].successor_nodes.append(tgt)\n if src not in self._nodes_collection[tgt].precursor_nodes:\n self._nodes_collection[tgt.split(':')[0]].precursor_nodes.append(src)", "def cmd_port (self, line):\r\n info = line[1].split (',')\r\n ip = '.'.join (info[:4])\r\n port = int(info[4])*256 + int(info[5])\r\n # how many data connections at a time?\r\n # I'm assuming one for now...\r\n # TODO: we should (optionally) verify that the\r\n # ip number belongs to the client. [wu-ftpd does this?]\r\n self.client_addr = (ip, port)\r\n self.respond ('200 PORT command successful.')", "def connect(self, handle: Handle, port: Port) -> bool:\n pin = self.pin\n if not pin.subject:\n pin.subject = pin.model.create(\n UML.InputPin if isinstance(pin, InputPinItem) else UML.OutputPin\n )\n\n assert isinstance(pin.subject, (UML.InputPin, UML.OutputPin))\n pin.subject.opaqueAction = self.action.subject\n\n # This raises the item in the item hierarchy\n pin.change_parent(self.action)\n\n return True", "def test_port_create_with_binding_and_no_subnets(self):\n with self.network() as network:\n segment = self._test_create_segment(\n network_id=network['network']['id'],\n physical_network='physnet',\n network_type=constants.TYPE_VLAN)\n\n # Map the host to the segment\n self._setup_host_mappings([(segment['segment']['id'], 'fakehost')])\n\n response = self._create_port(self.fmt,\n net_id=network['network']['id'],\n tenant_id=network['network']['tenant_id'],\n is_admin=True,\n arg_list=(portbindings.HOST_ID,),\n **{portbindings.HOST_ID: 'fakehost'})\n res = self.deserialize(self.fmt, response)\n\n # No subnets, so no allocation. But, it shouldn't be an error.\n self.assertEqual(0, len(res['port']['fixed_ips']))", "def _connection_maker(\n self,\n first_device,\n first_port,\n second_device,\n second_port):\n if first_port is None:\n return self.network.make_connection(\n first_device.id, None,\n second_device.id, second_port.id)\n else:\n return self.network.make_connection(\n first_device.id, first_port.id,\n second_device.id, second_port.id)", "def allow(self, handle, port):\n assert self.canvas\n\n line = self.line\n element = self.element\n\n # Check if no other items are connected\n connections = self.canvas.get_connections(connected=element)\n connected_items = [\n c\n for c in connections\n if isinstance(c.item, TransitionItem) and c.item is not line\n ]\n if handle is line.head and not any(connected_items):\n return super().allow(handle, port)\n else:\n return None", "def addConstraint(self, constraint: Constraint, /) -> None:\n ...", "def add_connection(self, switch_name, port1, port2, bidir=False):\n raise NotImplementedError()", "def createConstraint(*argv):", "def create_port_item(self, port_spec, is_connected, is_optional,\n is_visible, is_editable, parent=None):\n return PortItem(port_spec, is_connected, True, False, False, parent)", "def constraint(self, c):\n self.add_constraint(c)", "def create_port_forward_rule(self, ipaddressid, protocol, virtualmachineid,\n privateport, privateendport,\n publicport, publicendport): \n params = {'command':'createPortForwardingRule',\n 'ipaddressid':ipaddressid,\n 'protocol':protocol,\n 'privateport':privateport,\n 'privateendport':privateendport,\n 'publicport':publicport,\n 'publicendport':publicendport,\n 'virtualmachineid':virtualmachineid,\n 'openfirewall':False} \n\n try:\n response = self.send_request(params)\n res = json.loads(response)\n clsk_job_id = res['createportforwardingruleresponse']['jobid']\n self.logger.debug('Start job - createPortForwardingRule: %s' % res)\n return clsk_job_id\n except KeyError as ex:\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n raise ClskError(ex)", "def test_port_create_with_binding_information_fallback(self):\n with self.network() as network:\n with self.subnet(network=network,\n ip_version=constants.IP_VERSION_6,\n cidr='2001:db8:0:0::/64') as subnet:\n segment = self._test_create_segment(\n network_id=network['network']['id'],\n physical_network='physnet',\n network_type=constants.TYPE_VLAN)\n\n self._validate_l2_adjacency(network['network']['id'], is_adjacent=True)\n\n # Map the host to the segment\n self._setup_host_mappings([(segment['segment']['id'], 'fakehost')])\n\n response = self._create_port(self.fmt,\n net_id=network['network']['id'],\n tenant_id=network['network']['tenant_id'],\n is_admin=True,\n arg_list=(portbindings.HOST_ID,),\n **{portbindings.HOST_ID: 'fakehost'})\n\n res = self.deserialize(self.fmt, response)\n self._validate_immediate_ip_allocation(res['port']['id'])\n\n # Since the subnet is not on a segment, fall back to it\n self._assert_one_ip_in_subnet(response, subnet['subnet']['cidr'])", "def creation_validation(openstack_resource):\n validate_resource_quota(openstack_resource, PORT_OPENSTACK_TYPE)\n ctx.logger.debug('OK: port configuration is valid')", "def validate_rule(self, client):\n raise NotImplementedError(\"Please fix me.\")", "def validate_rule(self, client):\n raise NotImplementedError(\"Please fix me.\")", "def test_add_outgoing_connection():\n\n center = Coordinates(1 , 1)\n radius = 10\n speed_limit = 20\n\n i = Intersection(center, radius, speed_limit)\n i2 = Intersection(center, radius, speed_limit)\n i2.add_connection(10.0, 20, 2, 2, 40, 'test2')\n\n start = Coordinates(1,1)\n end = Coordinates(7, 9)\n len = 15\n out_ln = 2\n in_ln = 1\n ang = 3 * math.pi / 2\n\n road = Road(start, end, len, out_ln, in_ln, ang, 20, 'Test')\n\n l = i.get_connections()\n\n assert not l\n\n i.add_outgoing_connection(road)\n\n assert l\n assert l[0].get_length() == 15\n\n l2 = i2.get_connections()\n\n assert l2\n\n i2.add_outgoing_connection(road)\n\n assert l2\n assert l2[1].get_length() == 15", "def add_link_capacity(self, path, bw):\n\n # PART 1, TASK 3.4 add bw to edges", "def create_lines(self) -> None:\n res = []\n for connection in self.connections:\n start_component = self.components[connection.start_entity]\n end_component = self.components[connection.end_entity]\n start_pin_location = (\n start_component.location\n + start_component.pin_locations[connection.start_pin]\n )\n end_pin_location = (\n end_component.location + end_component.pin_locations[connection.end_pin]\n )\n\n x_midpoint = (start_pin_location.x + end_pin_location.x) / 2\n bend_start = Point(x_midpoint, start_pin_location.y)\n bend_end = Point(x_midpoint, end_pin_location.y)\n bends = [bend_start, bend_end]\n res.append(Line(connection, start_pin_location, *bends, end_pin_location))\n\n self.lines = res", "def _create_port_ext(self, res_port, req_port, context):\n commit = self._get_port_attr(req_port, \"commit\")\n trunked = self._get_port_attr(req_port, \"trunked\")\n hardware_id = self._get_port_attr(req_port, \"switch:hardware_id\")\n if commit is None:\n commit = False\n port_ext = db.create_port_ext(\n port_id=res_port[\"id\"],\n commit=commit,\n trunked=trunked,\n hardware_id=hardware_id,\n session=context.session)\n return port_ext.as_dict()", "def _make_connection(self, src_var, target_comp, target_vname):\n src_comp = src_var.component\n target_var = self._find_or_create_variable(target_comp.name, target_vname, src_var)\n # Sanity check the target variable\n if (target_var.get_type() == VarTypes.Mapped\n and target_var.get_source_variable(recurse=True) is src_var.get_source_variable(recurse=True)):\n# print \"Connection exists between\", src_var, \"and target\", target_var\n return target_var\n elif target_var.get_type() == VarTypes.Unknown:\n # We've created this variable, so should be ok, but check for gotchas\n assert not(hasattr(target_var, u'initial_value'))\n if src_comp is target_comp.parent():\n src_if = u'private'\n target_if = u'public'\n elif src_comp.parent() is target_comp:\n src_if = u'public'\n target_if = u'private'\n else:\n assert src_comp.parent() is target_comp.parent()\n src_if = u'public'\n target_if = u'public'\n # One special case: if the src_var is actually obtained from a different\n # component at this level or above, in which case we should use the real\n # source, not that given.\n if getattr(src_var, src_if + u'_interface', u'none') == u'in':\n src_var = src_var.get_source_variable()\n # Check and set the interface attributes\n# print \"Connecting source\", src_var, src_if, getattr(src_var, src_if + u'_interface', u'none'),\n# print \"to\", target_var, target_if, getattr(target_var, target_if + u'_interface', u'none')\n assert getattr(src_var, src_if + u'_interface', u'none') != u'in'\n assert getattr(target_var, target_if + u'_interface', u'none') != u'out'\n src_var.xml_set_attribute((src_if + u'_interface', None), u'out')\n target_var.xml_set_attribute((target_if + u'_interface', None), u'in')\n # Create the connection element\n self._create_connection_element(src_var, target_var)\n self.connections_made.add(frozenset([src_var, target_var]))\n # Ensure we handle a later connection attempt between these variables correctly\n target_var._set_source_variable(src_var)\n else:\n # Naming conflict; try again with a different target name\n return self._make_connection(src_var, target_comp, target_vname + u'_')\n return target_var", "def generate_connection_i(self,N_e):\n raise NotImplementedError", "def _createline(self):\n return self.cv.create_line(0, 0, 0, 0, fill=\"\", width=2,\n capstyle = TK.ROUND)", "def connect(self, socket_item, connection):\n # Populate connection.\n connection.socketItem = socket_item\n connection.plugNode = self.parentItem().name\n connection.plugAttr = self.attribute\n\n # Add socket to connected slots.\n if socket_item in self.connected_slots:\n self.connected_slots.remove(socket_item)\n self.connected_slots.append(socket_item)\n\n # Add connection.\n if connection not in self.connections:\n self.connections.append(connection)\n\n # Emit signal.\n nodzInst = self.scene().views()[0]\n\n nodzInst.portConnected(self.port(), socket_item.port())\n nodzInst.signal_PlugConnected.emit(connection.plugNode, connection.plugAttr, connection.socketNode, connection.socketAttr)", "def test_parseConnectionLine(self):\n fake_connection = \"5,3 2 4,3\"\n actual_result = rules.parseConnectionLine(fake_connection)\n self.assertEqual(actual_result.startX, 5)\n self.assertEqual(actual_result.startY, 3)\n self.assertEqual(actual_result.direction, 2)\n self.assertEqual(actual_result.endX, 4)\n self.assertEqual(actual_result.endY, 3)", "def test_commentline_item_with_no_subject_connect(create, diagram):\n comment = create(CommentItem, Comment)\n line = create(CommentLineItem)\n gi = create(GeneralizationItem)\n\n connect(line, line.head, comment)\n connect(line, line.tail, gi)\n assert diagram.connections.get_connection(line.tail).connected is gi\n assert len(comment.subject.annotatedElement) == 0", "def __init__(self, constraint: ConstraintExpr):\n self.constraint = constraint", "def __init__(__self__, *,\n from_port: pulumi.Input[int],\n to_port: pulumi.Input[int]):\n pulumi.set(__self__, \"from_port\", from_port)\n pulumi.set(__self__, \"to_port\", to_port)", "def _build_connection(self,\n node,\n source_port: PipelineNodeIO = None,\n target_port: PipelineNodeIO = None,\n filters: entities.Filters = None,\n action: str = None) -> PipelineConnection:\n if source_port is None and self.outputs:\n source_port = self.outputs[0]\n\n if target_port is None and node.inputs:\n target_port = node.inputs[0]\n\n if node.is_root():\n self._pipeline.set_start_node(self)\n\n source_connection = PipelineConnectionPort(node_id=self.node_id, port_id=source_port.port_id)\n target_connection = PipelineConnectionPort(node_id=node.node_id, port_id=target_port.port_id)\n if action is None and source_port.actions is not None and source_port.actions != []:\n action = source_port.actions[0]\n connection = PipelineConnection(source=source_connection, target=target_connection, filters=filters,\n action=action)\n return connection", "def createConstraint(self):\n return _libsbml.Model_createConstraint(self)", "def add_rule(self, ip_protocol, from_port, to_port,\r\n src_group_name, src_group_owner_id, cidr_ip):\r\n rule = IPPermissions(self)\r\n rule.ip_protocol = ip_protocol\r\n rule.from_port = from_port\r\n rule.to_port = to_port\r\n self.rules.append(rule)\r\n rule.add_grant(src_group_name, src_group_owner_id, cidr_ip)", "def test_commentline_glie_to_item_with_no_subject(create, diagram):\n line = create(CommentLineItem)\n gi = create(GeneralizationItem)\n\n assert allow(line, line.tail, gi)", "def ftp_PORT(self, line):\n # Parse PORT request for getting IP and PORT.\n # Request comes in as:\n # > h1,h2,h3,h4,p1,p2\n # ...where the client's IP address is h1.h2.h3.h4 and the TCP\n # port number is (p1 * 256) + p2.\n try:\n line = line.split(',')\n ip = \".\".join(line[:4]).replace(',','.')\n port = (int(line[4]) * 256) + int(line[5])\n except (ValueError, OverflowError):\n self.respond(\"501 Invalid PORT format.\")\n return\n\n # FTP bounce attacks protection: according to RFC-2577 it's\n # recommended to reject PORT if IP address specified in it\n # does not match client IP address.\n if not self.permit_foreign_addresses:\n if ip != self.remote_ip:\n self.log(\"Rejected data connection to foreign address %s:%s.\"\n %(ip, port))\n self.respond(\"501 Can't connect to a foreign address.\")\n return\n\n # ...another RFC-2577 recommendation is rejecting connections\n # to privileged ports (< 1024) for security reasons.\n if not self.permit_privileged_ports:\n if port < 1024:\n self.log('PORT against the privileged port \"%s\" refused.' %port)\n self.respond(\"501 Can't connect over a privileged port.\")\n return\n\n # close existent DTP-server instance, if any.\n if self.data_server:\n self.data_server.close()\n self.data_server = None\n\n if self.data_channel:\n self.data_channel.close()\n self.data_channel = None\n\n # make sure we are not hitting the max connections limit\n if self.ftpd_instance.max_cons:\n if len(self._map) >= self.ftpd_instance.max_cons:\n msg = \"Too many connections. Can't open data channel.\"\n self.respond(\"425 %s\" %msg)\n self.log(msg)\n return\n\n # open DTP channel\n self.active_dtp(ip, port, self)", "def test_port_create_on_multiconnected_host(self):\n network, segments, subnets = self._create_test_segments_with_subnets(2)\n\n # This host is bound to multiple hosts\n self._setup_host_mappings([(segments[0]['segment']['id'], 'fakehost'),\n (segments[1]['segment']['id'], 'fakehost')])\n\n response = self._create_port(self.fmt,\n net_id=network['network']['id'],\n tenant_id=network['network']['tenant_id'],\n is_admin=True,\n arg_list=(portbindings.HOST_ID,),\n **{portbindings.HOST_ID: 'fakehost'})\n self.deserialize(self.fmt, response)\n\n # multi segments supported since Antelope.\n self.assertEqual(webob.exc.HTTPCreated.code, response.status_int)", "def define_edge(self):\n\n self.canvas_edge = Line(\n points=[\n self.canvas_nodes[0].pos[0] + self.nodesize[0] / 2,\n self.canvas_nodes[0].pos[1] + self.nodesize[1] / 2,\n self.canvas_nodes[1].pos[0] + self.nodesize[0] / 2,\n self.canvas_nodes[1].pos[1] + self.nodesize[1] / 2\n ],\n joint='round',\n cap='round',\n width=3\n )", "def test_glue_no_port_no_can_glue(self):\n\n class Tool(ConnectHandleTool):\n def __init__(self, *args):\n super(Tool, self).__init__(*args)\n self._calls = 0\n\n def can_glue(self, *args):\n self._calls += 1\n\n tool = Tool(self.view)\n # at 300, 50 there should be no item\n sink = tool.glue(self.line, self.head, (300, 50))\n assert sink is None\n self.assertEquals(0, tool._calls)", "def _add_connection(self, con):\n # get connectors by the above specified labels\n start = self.connector_by_label(con[0])\n end = self.connector_by_label(con[1])\n if start.parent_type == 'box' and end.parent_type == 'box':\n # make sure, that not two inputs or two outputs are connected\n if start.connector_type == end.connector_type:\n raise ConnectorError(f\"Connection {con} connects \"\n f\"input to input or output to output.\")\n # make sure, that inputs are always first\n # and outputs are always second\n elif (start.connector_type == 'output'\n or end.connector_type == 'input'):\n start, end = end, start\n # make sure, that a switch does not connect to itself\n elif start.parent_type == 'switch' and end.parent_type == 'switch':\n if start.switch == end.switch:\n raise ConnectorError(f\"Connection {con} connects \"\n f\"a switch to itself.\")\n\n # create connection\n connection = ArduinoSwitchControlConnection(start, end)\n\n # add connection to attributes\n self.connections.append(connection)", "def add_connection(\n self, port1: ryvencore.NodePort.NodeOutput, port2: ryvencore.NodePort.NodeInput\n ) -> ryvencore.Connection.DataConnection:\n ryven_connection = self.script.flow.connect_nodes(port1, port2)\n if not ryven_connection:\n return\n\n # Add connection in compas graph\n node1 = port1.node\n node2 = port2.node\n edge_key = (node1.GLOBAL_ID, node2.GLOBAL_ID)\n if not self.has_edge(*edge_key):\n self.add_edge(*edge_key, {\"connections\": []})\n connections = self.edge_attribute(edge_key, \"connections\")\n connections.append({\"port1\": self.get_port_info(port1), \"port2\": self.get_port_info(port2)})\n self.edge_attribute(edge_key, \"connections\", connections)\n\n return ryven_connection", "def make_connection(self):\n if self._created_connections[self._pattern_idx] >= self.max_connections_per_pattern:\n raise ConnectionError(\"Too many connections\")\n self._created_connections[self._pattern_idx] += 1\n conn = self.connection_class(**self.patterns[self._pattern_idx])\n conn._pattern_idx = self._pattern_idx\n return conn", "def _validate_port_can_commit(self, res_port, req_port,\n session=None):\n switchport_ids = [p[\"id\"] for p in res_port[\"switch:ports\"]]\n\n if not switchport_ids:\n msg = (\"Cannot attach, no switchports found\")\n raise exc.InvalidInput(error_message=msg)\n\n bound_port_ids = []\n if switchport_ids:\n # Fetch all existing networks we are attached to.\n portbindings = db.filter_switchport_bindings_by_switch_port_ids(\n switchport_ids, session=session)\n portbindings = list(portbindings)\n bound_port_ids = set([pb.port_id for pb in portbindings])\n\n # We can't attach to a non-trunked network if the port is already\n # attached to another network.\n if bound_port_ids and (res_port[\"trunked\"] is False):\n msg = (\"Cannot attach non-trunked network, port \"\n \"already bound to network(s) %s\" % (bound_port_ids))\n raise exc.InvalidInput(error_message=msg)\n\n for bound_port_id in bound_port_ids:\n # We can't attach a trunked network if we are already attached\n # to a non-trunked network.\n port_ext = db.get_port_ext(bound_port_id, session=session)\n if not port_ext.trunked:\n msg = (\"Already attached via non-trunked \"\n \"port %s\" % (bound_port_id))\n raise exc.InvalidInput(error_message=msg)", "def add_link_reservation(self, node_id, tp_ofid, value):\n\n try:\n # Look for the port\n port = None\n for edge in self.neighbors[node_id]:\n if edge[\"src_port\"] == tp_ofid:\n port = edge\n break\n\n # Stop if the port is down\n if port is None:\n # fname = sys._getframe().f_code.co_name\n # print(\"{}: Port {} not found. Exiting.\".format(fname, tp_ofid))\n return\n\n # Adjust the link reservation amount\n port[\"bps_reserved\"] += value\n except KeyError:\n pass", "def allocate_connection_on_interconnect(bandwidth=None, connectionName=None, ownerAccount=None, interconnectId=None, vlan=None):\n pass", "def generate_connection_e(self,N_e):\n raise NotImplementedError", "def add_constraint(self, constraint):\n self.constraints.append(constraint)", "def publish_constraint(self):\n links = []\n for x, kf in enumerate(self.keyframes[1:], 1):\n p1 = self.keyframes[x - 1].pose.x(), self.keyframes[x - 1].pose.y()\n p2 = self.keyframes[x].pose.x(), self.keyframes[x].pose.y()\n links.append((p1, p2, \"green\"))\n\n for k, _ in self.keyframes[x].constraints:\n p0 = self.keyframes[k].pose.x(), self.keyframes[k].pose.y()\n links.append((p0, p2, \"red\"))\n\n if links:\n link_msg = ros_constraints(links)\n link_msg.header.stamp = self.current_keyframe.time\n self.constraint_pub.publish(link_msg)", "def add_connector(self):\n \n no = len(self.connectors)\n state = {}\n state[\"s_pin\"] = no\n state[\"p_pin\"] = no\n state[\"s_label\"] = \"C%d\" % no\n \n if len(self.connectors)>0:\n state = self.connectors[-1].get_state()\n state[\"s_pin\"] = no\n state[\"p_pin\"] = no\n state[\"s_label\"] = \"C%d\" % (no)\n else:\n if self.mount == self.MOUNT_THT:\n state[\"p_shape\"] = Con.SHAPE_HOLE\n elif self.mount == self.MOUNT_SMD:\n state[\"p_shape\"] = Con.SHAPE_PAD\n \n c = Con(no)\n c.set_state(state) \n \n self.sch_layers[\"pins\"].add(c.s_svg)\n self.pcb_layers[\"copper1\"].add(c.p_svg)\n self.connectors.append(c)", "def connector(midpoint = (0, 0), width = 1, orientation = 0):\n D = Device(name = 'connector')\n D.add_port(name = 1, midpoint = [midpoint[0], midpoint[1]],\n width = width, orientation = orientation)\n D.add_port(name = 2, midpoint = [midpoint[0], midpoint[1]],\n width = width, orientation = orientation - 180)\n return D", "def create_link(self, node1, port1, node2, port2=None):\n if not port2:\n ports_in_use = set((node['adapter_number'], node['port_number']) for link in self.links() for node in link['nodes'] if node['node_id'] == node2['node_id'])\n available_ports = (port for port in node2['ports'] if (port['adapter_number'], port['port_number']) not in ports_in_use)\n next_available_port = next(available_ports)\n else:\n next_available_port = node2['ports'][port2]\n\n link_obj = {'nodes' : [{'adapter_number' : node1['ports'][port1]['adapter_number'],\n 'port_number' : node1['ports'][port1]['port_number'],\n 'label' : { 'text' : node1['ports'][port1]['name']},\n 'node_id' : node1['node_id']},\n {'adapter_number' : next_available_port['adapter_number'],\n 'port_number' : next_available_port['port_number'],\n 'label' : { 'text' : next_available_port['name']},\n 'node_id' : node2['node_id']}]}\n\n links_url = \"{}/links\".format(self.url)\n\n result = requests.post(links_url, auth=self.auth, data=json.dumps(link_obj))\n result.raise_for_status()\n #links.append(link_obj)", "def test_commentline_element_connect(create, diagram):\n comment = create(CommentItem, Comment)\n line = create(CommentLineItem)\n ac = create(ActorItem, UML.Actor)\n\n connect(line, line.head, comment)\n connect(line, line.tail, ac)\n assert diagram.connections.get_connection(line.tail).connected is ac\n assert len(comment.subject.annotatedElement) == 1\n assert ac.subject in comment.subject.annotatedElement", "def add_connection(self, ip, port, key):\n\n # Socket declaration\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.bind((ip, port))\n\n # Adding connection to the list\n self.connections[key] = sock", "def _check_connectionline(self):\n self.connection_first_device, \\\n self.connection_first_port \\\n = self._check_validconnectionoutput()\n if self._is_arrow(self.symbol):\n # Get next symbol\n self.symbol = self.scanner.get_symbol()\n self.connection_second_device, \\\n self.connection_second_port \\\n = self._check_validconnectioninput()\n if len(\n self.semantic_errors_list) == 0 and len(\n self.syntax_errors_list) == 0:\n # Only create connection if no previous errors\n connection_error = self._connection_maker(\n self.connection_first_device,\n self.connection_first_port,\n self.connection_second_device,\n self.connection_second_port)\n # Send the returned error ID for error reporting\n self._display_semantic_error(connection_error)\n # Run a while loop to check for possible multiple connections from\n # same output\n while (\n not self._is_semicolon(\n self.symbol)) and (\n not self._is_eof(\n self.symbol)):\n if self._is_comma(self.symbol):\n self.symbol = self.scanner.get_symbol()\n self.connection_second_device, \\\n self.connection_second_port \\\n = self._check_validconnectioninput()\n if len(\n self.semantic_errors_list) == 0 and len(\n self.syntax_errors_list) == 0:\n # Only create connection if no previous errors\n connection_error = self._connection_maker(\n self.connection_first_device,\n self.connection_first_port,\n self.connection_second_device,\n self.connection_second_port)\n # Send the returned error ID for error reporting\n self._display_semantic_error(connection_error)\n else:\n # No comma\n self._display_syntax_error(\"comma\")\n self._semicolon_skipper()\n self.symbol = self.scanner.get_symbol()\n elif self._is_semicolon(self.symbol):\n self.symbol = self.scanner.get_symbol()\n else:\n # No '->'\n self._display_syntax_error(\"arrow\")\n self._semicolon_skipper()\n self.symbol = self.scanner.get_symbol()\n return None", "def is_constraint(self, line):\n constraints = ['PRIMARY', 'KEY', 'UNIQUE', 'CONSTRAINT']\n for constraint in constraints:\n if line.startswith(constraint): return True\n return False", "def test_reconnect_another(self):\n line, head = self._get_line()\n self.tool.connect(line, head, (120, 50))\n cinfo = self.canvas.get_connection(head)\n assert cinfo is not None\n item = cinfo.connected\n port = cinfo.port\n constraint = cinfo.constraint\n\n assert item == self.box1\n assert port == self.box1.ports()[0]\n assert item != self.box2\n\n # connect to box2, handle's connected item and connection data\n # should differ\n self.tool.connect(line, head, (120, 150))\n cinfo = self.canvas.get_connection(head)\n assert cinfo is not None\n self.assertEqual(self.box2, cinfo.connected)\n self.assertEqual(self.box2.ports()[0], cinfo.port)\n\n # old connection does not exist\n self.assertNotEqual(item, cinfo.connected)\n self.assertNotEqual(constraint, cinfo.constraint)", "def create_line(uniform = True, *args):\n axis = cmds.radioButtonGrp(widgets[\"lineAxisRBG\"], q=True, sl=True)\n length = cmds.floatFieldGrp(widgets[\"lineLenFFG\"], q=True, v1=True)\n density = cmds.floatFieldGrp(widgets[\"lineDenFFG\"], q=True, v1=True)\n\n numCvs = length * density\n if numCvs < 3.0: # curve needs 3 cvs (for 3 dg curve)\n numCvs = 3.0\n\n cvDist = length/numCvs\n\n # make a list of pt dist along some axis\n axisList = []\n for x in range(0,int(numCvs)+1):\n axisList.append(x)\n\n pts = []\n\n if axis == 1:\n for y in range(0, int(numCvs)+1):\n pt = [axisList[y]*cvDist, 0, 0]\n pts.append(pt)\n\n if axis == 2:\n for y in range(0, int(numCvs)+1):\n pt = [0, axisList[y]*cvDist, 0]\n pts.append(pt)\n\n if axis == 3:\n for y in range(0, int(numCvs)+1):\n pt = [0, 0, axisList[y]*cvDist]\n pts.append(pt)\t\t\t\n \n line = cmds.curve(name = \"line_01\", d=3, p=pts)\n shp = cmds.listRelatives(line, s=True)[0]\n cmds.rename(shp, \"{0}Shape\".format(line))\n if uniform:\n line = cmds.rebuildCurve(line, rebuildType = 0, spans = 0, keepRange = 0, replaceOriginal=True, end=1, keepControlPoints=0)[0]\n\n cmds.select(line, r=True)", "def add_node():\n\ttry:\n\t\tif(check_entries()):\n\t\t\tserver_entry = [frame.entries[0].get(), str(frame.entries[1].get()+\".\"+frame.entries[2].get()+\".\"+frame.entries[3].get()+\".\"+frame.entries[4].get())]\n\t\t\tclient_entry = [frame.entries[5].get(), str(frame.entries[6].get()+\".\"+frame.entries[7].get()+\".\"+frame.entries[8].get()+\".\"+frame.entries[9].get())]\n\t\t\tconnection_lifetime = frame.entries[10].get()\n\t\t\tnetwork.add_connection(server_entry, client_entry, connection_lifetime);\n\n\texcept ValueError as err:\n\t\tfeedback.config(text=err)", "def create_nodes_collinear_2D_constraint():\n return NodesCollinear2DConstraint()", "def add_link(self, src_node_id, dst_node_id, src_port, dst_port, capacity=10000000):\n\n # Get function name\n fname = sys._getframe().f_code.co_name\n \n # Do not add the link if the nodes are not in the topology\n if src_node_id not in self.nodes or dst_node_id not in self.nodes:\n #print(\"{}: Nodes {} and/or {} not found - can't add link\".\n # format(fname, src_node_id, dst_node_id),\n # file=sys.stderr)\n return\n\n # Do not add the link if the link already exists\n for i in range(0, len(self.neighbors[src_node_id])):\n neighbor_id = self.neighbors[src_node_id][i][\"dst_node_id\"]\n if (neighbor_id == dst_node_id and\n self.neighbors[src_node_id][i][\"src_port\"] == src_port and\n self.neighbors[src_node_id][i][\"dst_port\"] == dst_port):\n # print(\"{}: link already exists - exiting\".format(fname))\n return\n\n # Do not add the link if the link already exists\n for i in range(0, len(self.neighbors[dst_node_id])):\n neighbor_id = self.neighbors[dst_node_id][i][\"dst_node_id\"]\n if (neighbor_id == src_node_id and\n self.neighbors[dst_node_id][i][\"src_port\"] == dst_port and\n self.neighbors[dst_node_id][i][\"dst_port\"] == src_port):\n # print(\"{}: link already exists - exiting\".format(fname))\n return\n\n # Derive src/dst interfaces\n # Want to make a version of this function in the Topology class. Not\n # good architecture\n src_int = self.mgr.get_interface(src_node_id, src_port)\n dst_int = self.mgr.get_interface(dst_node_id, dst_port)\n \n # Destination entry in the topology\n src_entry = {\n \"src_node_id\": src_node_id,\n \"dst_node_id\": dst_node_id,\n \"src_port\": src_port,\n \"dst_port\": dst_port,\n \"src_int\": src_int,\n \"dst_int\": dst_int,\n \"bps_reserved\": 0,\n \"bps_current\": 0,\n \"bps_capacity\": capacity,\n \"cur_bytes_sent\": 0,\n \"cur_bytes_recvd\": 0,\n \"prev_bytes_sent\": 0,\n \"prev_bytes_recvd\": 0,\n \"utilization_pct\": 0.0\n }\n self.neighbors[src_node_id].append(src_entry)\n\n # Destination entry in the topology\n dst_entry = {\n \"src_node_id\": dst_node_id,\n \"dst_node_id\": src_node_id,\n \"src_port\": dst_port,\n \"dst_port\": src_port,\n \"src_int\": dst_int,\n \"dst_int\": src_int,\n \"bps_reserved\": 0,\n \"bps_current\": 0,\n \"bps_capacity\": capacity,\n \"cur_bytes_sent\": 0,\n \"cur_bytes_recvd\": 0,\n \"prev_bytes_sent\": 0,\n \"prev_bytes_recvd\": 0,\n \"utilization_pct\": 0.0\n }\n self.neighbors[dst_node_id].append(dst_entry)\n\n # Add the link to self.links if it does not already exist\n # if ((src_port, dst_port) not in self.links and\n # (dst_port, src_port) not in self.links):\n # self.links[(src_port, dst_port)] = src_index\n # self.links[(dst_port, dst_port)] = dst_index\n \n self.l += 1", "def get_connection(self, *args, **kwargs):\n conn = super(SSLHTTPAdapter, self).get_connection(*args, **kwargs)\n if conn.assert_hostname != self.assert_hostname:\n conn.assert_hostname = self.assert_hostname\n return conn", "def plt_connecting_lines():\n\n for i in range(0, Molecule.connection_count):\n tmp1 = Molecule.right_endpt[Molecule.left_connection[i] - 1]\n tmp2 = Molecule.left_endpt[Molecule.right_connection[i] - 1]\n tmp3 = Molecule.energy[Molecule.left_connection[i] - 1]\n tmp4 = Molecule.energy[Molecule.right_connection[i] - 1]\n\n plt.plot([tmp1, tmp2], [tmp3, tmp4], color=PlotParameter.connection_line_color,\n lw=PlotParameter.connection_line_width, linestyle='--')\n\n return None", "def add_connection_beginning(self, route, potential_solution):\n\n current_station = route.stations[0]\n\n # pick a random new station out of all connections of the current station\n new_station = random.choice(list(current_station.connections.keys()))\n\n # find the connection between the two stations \n link = current_station.connections[new_station] \n\n # find the time of the connection \n time = link.time \n \n\n # only accept the change if it wouldn't exceed the maximum time\n if time + route.total_time <= self.max_minutes:\n \n # insert the connection to the front of the connection list\n route.insert_connection(link, time, 0)\n\n # insert the station to the front of the station list\n route.insert_station(new_station, 0)", "def constraints(self):\n ...", "def setup_logical_port_connectivity(self, context, port_db):\n pass", "def test_port_init_singelton(self):\n v1 = 23.0\n v2 = 29.0\n p1 = cn.Port()\n p1.value = v1\n # check setting of value\n p2 = cn.Port(v1)\n # check fixing\n p3 = cn.Port(\n value=v1,\n fixed=True\n )\n p4 = cn.Port(\n value=v1,\n fixed=False\n )\n # check linking\n p5 = cn.Port(v2)\n p5.link = p4\n\n self.assertEqual(\n np.allclose(\n p1.value, p2.value\n ),\n True\n )\n self.assertEqual(p3.fixed, True)\n self.assertEqual(p4.fixed, False)\n self.assertEqual(\n np.allclose(\n p5.value,\n p4.value\n ),\n True\n )\n\n # check bounds\n fixed = False\n is_output = False\n is_reactive = False\n is_bounded = True\n lower_bound = 2\n upper_bound = 5\n value = 0\n p6 = cn.Port(\n value=value,\n fixed=fixed,\n is_output=is_output,\n is_reactive=is_reactive,\n is_bounded=is_bounded,\n lb=lower_bound,\n ub=upper_bound\n )\n self.assertEqual(\n np.all(p6.value <= upper_bound),\n True\n )\n self.assertEqual(\n np.all(p6.value >= lower_bound),\n True\n )\n self.assertAlmostEqual(\n p6.value[0],\n lower_bound # the lower bound is not part of the\n )", "def addPortOnSide(self, node: LNode, portSide: PortSide) -> LPort:\n side = node.getPortSideView(portSide)\n port = LPort(node, None, side=portSide, name=\"port%d\" % len(side))\n side.append(port)\n\n if not node.portConstraints.isSideFixed():\n node.portConstraints = PortConstraints.FIXED_SIDE\n\n return port", "def setup_rule(self, client, *args, **keyword_args):\n pass", "def setup_rule(self, client, *args, **keyword_args):\n pass", "def create_outbound(self, addr, use_new_connection=False):", "def _check_binding(self, value: t.Any) -> t.Any:\n client = getattr(value, \"CLIENT\", value)\n if isinstance(client, self.__class__) and client is not self:\n err = f\"{value} is already set to {client!r} and cannot be set to {self!r}\"\n raise ConnectError(err)\n value.CLIENT = self\n return value", "def connect(self, address: Tuple[str, int]) -> None:\n ...", "def item_create(\n item, item_id, item_type, create=\"create\", extra_args=None, cibfile=None\n):\n cmd = [\"pcs\"]\n if isinstance(cibfile, str):\n cmd += [\"-f\", cibfile]\n\n if isinstance(item, str):\n cmd += [item]\n elif isinstance(item, (list, tuple)):\n cmd += item\n\n # constraint command follows a different order\n if item in [\"constraint\"]:\n if isinstance(item_type, str):\n cmd += [item_type]\n\n if isinstance(create, str):\n cmd += [create]\n elif isinstance(create, (list, tuple)):\n cmd += create\n\n # constraint command needs item_id in format 'id=<id' after all params\n # constraint command follows a different order\n if item not in [\"constraint\"]:\n cmd += [item_id]\n if isinstance(item_type, str):\n cmd += [item_type]\n\n if isinstance(extra_args, (list, tuple)):\n # constraint command needs item_id in format 'id=<id' after all params\n if item in [\"constraint\"]:\n extra_args = extra_args + [\"id={}\".format(item_id)]\n cmd += extra_args\n\n return __salt__[\"cmd.run_all\"](cmd, output_loglevel=\"trace\", python_shell=False)", "def __init__(self, source_node, source_gate_name, target_node, target_slot_name, weight=1):\n self.link(source_node, source_gate_name, target_node, target_slot_name, weight)", "def serial_connection(self, value):\n if len(value) > 4:\n raise ValueUnsupportedError(\n 'serial port connection list', value,\n 'no more than 4 connections (ESXi limitation)')\n COTDeploy.serial_connection.fset(self, value)", "def __init__(self, link, lineproto, interface_name):\n self.link = link\n self.lineproto = lineproto\n self.interface_name = interface_name", "def __init__(self, ip, port, header):\n \n self.header = header\n self.ip = ip\n self.port = port\n try:\n self._connect_socket()\n except socket.error as e:\n print(e)\n self.close_and_exit()", "def __init__(self, ip, port, automatic = True, control_buf_size = 32, data_buf_size = 128, \\\n m_to = 0.01, socket_to = 0.005):\n self.conn = Connector(ip, port, control_buf_size, data_buf_size, socket_to)\n self.conn.connect()\n self.m_to = m_to\n self.status = Modem.Status.IDLE\n self.node_status = 0\n self.automatic = automatic\n self.interpreter = Interpreter()\n self.mainPID = os.getpid()\n self.error_status = Modem.ErrorDict.NONE\n self.commands_queue = \"\".split(Interpreter.END_COMMAND)\n if automatic:\n thread.start_new_thread(self.run,())", "def connectCurrentConnection(self, position):\n\n # get the item at the position\n itemAt = self.itemAt(position.toPoint(), self.getView().transform())\n\n # remove the connection (a new connection will get added if there is a valid connector)\n self.removeItem(self.currentlyConnecting)\n connection = self.currentlyConnecting\n self.currentlyConnecting = None\n\n\n \"\"\" if itemAt is a Connector (Top/Bottom) item (if you pull onto a Blob) \"\"\"\n if itemAt is not None and isinstance(itemAt, ConnectorItem) and not self.disabled:\n # check, whether the connection is already connected to connector of the given type (top/bottom)\n if connection.checkSameConnectorTypeRestriction(itemAt):\n # get the connectors\n if itemAt.isTopConnector():\n topConnector = itemAt\n bottomConnector = connection.getConnectorIfNotFullyConnected()\n else:\n topConnector = connection.getConnectorIfNotFullyConnected()\n bottomConnector = itemAt\n\n # get data needed to notify the underling data structure\n topLayerID = topConnector.getNodeItem().getLayerID()\n bottomLayerID = bottomConnector.getNodeItem().getLayerID()\n topBlobIndex = topConnector.getIndex()\n bottomBlobIndex = bottomConnector.getIndex()\n\n # notify to change the data\n self.__nodeEditor.tryToConnect(topLayerID, topBlobIndex, bottomLayerID, bottomBlobIndex)\n\n \"\"\" if itemAt is a Node Item (if you pull onto a layer) \"\"\"\n if itemAt is not None and isinstance(itemAt, NodeItem) and not self.disabled:\n # test if connector starts at a top Blob\n if connection.getConnectorIfNotFullyConnected().isTopConnector():\n\n # bottomNode is itemAt\n bottomNode = itemAt\n\n # get layer IDs\n topLayerID = connection.getConnectorIfNotFullyConnected().getNodeItem().getLayerID()\n bottomLayerID = bottomNode.getLayerID()\n topBlobIndex = connection.getConnectorIfNotFullyConnected().getIndex()\n\n # get the Index of the new Blob, should it be necessary to create one\n # (determined in the following for loop)\n bottomBlobIndex = bottomNode.getBottomConnectorCount()\n\n # current connection top name and phase\n topBlobName = connection.getConnectorIfNotFullyConnected().getBlobName()\n topBlobPhase = connection.getConnectorIfNotFullyConnected().getPhase()\n\n # check if there is a connected Node that has a different phase than the currently\n # connecting Node, but has a connection with the same top Blob Name\n topBlobFound = False\n for bottomBlob in bottomNode.getBottomConnectors():\n if len(bottomBlob.getConnectedNodes()) > 0:\n for topNode in bottomBlob.getConnectedNodes():\n for topBlob in topNode.getTopConnectors():\n if topBlob.getBlobName() == topBlobName and topBlob.getPhase() != topBlobPhase:\n bottomBlobIndex = bottomBlob.getIndex()\n topBlobFound = True\n break\n\n # otherwise (if no corresponding top Blob was found)\n # get Index of first empty bottom blob (if available)\n counter = -1\n emptyBlobAvailable = False\n if not topBlobFound:\n for blob in bottomNode.getBottomConnectors():\n counter += 1\n if len(blob.getConnectedNodes()) == 0:\n bottomBlobIndex = counter\n emptyBlobAvailable = True\n break\n\n # add empty bottom blob property\n if not emptyBlobAvailable and not topBlobFound:\n self.__nodeEditor.tryToAddBottomBlob(bottomLayerID, \"\")\n\n # connect nodes\n connected = self.__nodeEditor.tryToConnect(topLayerID, topBlobIndex, bottomLayerID, bottomBlobIndex)\n\n # if the connection did not work but a new blob was created, remove it\n if not connected and not emptyBlobAvailable and not topBlobFound:\n bottomNode.removeBottomConnector(bottomBlobIndex)", "def add_constraint(self, constraint):\n self._ckey += 1\n self.constraints[self._ckey] = constraint", "def drawHollowConstraints(fname, linewidth=2, rgb=\"(1,1,1)\"):\n constraints = read_parameters(fname)\n if not constraints.type in (\"box\", \"brick\"):\n raise NotImplementedError(\"sorry, constraint %(type)r not implemented\" % vars(constraints))\n c = constraints\n return drawBrick(c.res_num1, c.atom1, c.res_num2, c.atom2,\n chain1=c.chain1, chain2=c.chain2,\n offset1=c.offset1, offset2=c.offset2,\n boxname=c.type,\n linewidth=linewidth, rgb=rgb)", "def removeConnection(self, connectionItem):\n assert isinstance(connectionItem, ConnectionItem)\n assert connectionItem.startPortName() is not None\n assert connectionItem.endPortName() is not None\n\n self.removeConnectionByPortNames(connectionItem.startPortName(),\n connectionItem.endPortName())", "def _create_connection_element(self, var1, var2):\n conn, swap = self._find_connection_element(var1, var2)\n if conn:\n if swap:\n var1, var2 = var2, var1\n else:\n conn = var1.xml_create_element(u'connection', NSS[u'cml'])\n mapc = var1.xml_create_element(u'map_components', NSS[u'cml'],\n attributes={u'component_1': var1.component.name,\n u'component_2': var2.component.name})\n conn.xml_append(mapc)\n self.model.xml_append(conn)\n mapv = var1.xml_create_element(u'map_variables', NSS[u'cml'],\n attributes={u'variable_1': var1.name,\n u'variable_2': var2.name})\n conn.xml_append(mapv)", "def validate_conn(self, solution):\r\n\r\n active_nodes = [idx for idx, value in enumerate(solution) # remove not included nodes in solution\r\n if value != 0 and idx not in self.dead_nodes and self.network.get_node(idx).energy >= cf.COMMUNICATION_ENERGY]\r\n active_nodes.append(-1) # add a sink node \r\n visited = self.DFS(self.network_graph, active_nodes[0], active_nodes)\r\n if len(visited) == len(active_nodes):\r\n return True\r\n else:\r\n return False", "def rcv_addr(self, node_name, **kwargs):\n # Get keyword args\n auto_create = kwargs.get(\"auto_create\", True)\n auto_delete = kwargs.get(\"auto_delete\", False)\n link_name = kwargs.get(\"link_name\")\n durable = kwargs.get(\"durable\", False)\n browse = kwargs.get(\"browse\", False)\n exclusive = kwargs.get(\"exclusive\", False)\n binding_list = kwargs.get(\"binding_list\", [])\n ftd_count = kwargs.get(\"ftd_count\")\n ftd_size = kwargs.get(\"ftd_size\")\n policy = kwargs.get(\"policy\", \"flow-to-disk\")\n\n create_policy = None\n if auto_create:\n create_policy = \"always\"\n delete_policy = None\n if auto_delete:\n delete_policy = \"always\"\n mode = None\n if browse:\n mode = \"browse\"\n x_declare_list = [\"\\\"exclusive\\\": %s\" % exclusive]\n if ftd_count != None or ftd_size != None:\n queue_policy = [\"\\'qpid.policy_type\\': %s\" % policy]\n if ftd_count:\n queue_policy.append(\"\\'qpid.max_count\\': %d\" % ftd_count)\n if ftd_size:\n queue_policy.append(\"\\'qpid.max_size\\': %d\" % ftd_size)\n x_declare_list.append(\"arguments: %s\" % self._fmt_map(queue_policy))\n x_bindings_list = []\n for binding in binding_list:\n x_bindings_list.append(\"{exchange: %s, key: %s}\" % binding)\n if durable: reliability = 'at-least-once'\n else: reliability = None\n return self.addr_fmt(node_name, create_policy=create_policy, delete_policy=delete_policy, mode=mode, link=True,\n link_name=link_name, durable=durable, x_declare_list=x_declare_list,\n x_bindings_list=x_bindings_list, link_reliability=reliability)", "def test_connection(self):\n gen = self.create(GeneralizationItem)\n c1 = self.create(ClassItem, UML.Class)\n c2 = self.create(ClassItem, UML.Class)\n\n self.connect(gen, gen.tail, c1)\n assert self.get_connected(gen.tail) is c1\n\n self.connect(gen, gen.head, c2)\n assert gen.subject is not None\n assert gen.subject.general is c2.subject\n assert gen.subject.specific is c1.subject" ]
[ "0.5818294", "0.5687332", "0.5500008", "0.52568966", "0.5220818", "0.5207515", "0.52057487", "0.505119", "0.5035412", "0.5020907", "0.49766943", "0.4966835", "0.49285123", "0.492827", "0.492827", "0.49215764", "0.49072984", "0.49011382", "0.4897819", "0.48977235", "0.4889161", "0.4876178", "0.4873932", "0.48451567", "0.48292992", "0.48227188", "0.47957954", "0.47898212", "0.47815078", "0.4751471", "0.4746205", "0.474032", "0.474032", "0.47365743", "0.47233927", "0.47203544", "0.471862", "0.4715587", "0.47126704", "0.47111252", "0.468648", "0.46840006", "0.46773657", "0.4669509", "0.4651717", "0.46445978", "0.46434456", "0.46392262", "0.46280602", "0.46247777", "0.46145654", "0.4612712", "0.45941883", "0.45859197", "0.45857245", "0.4585033", "0.45808288", "0.45802006", "0.45787632", "0.4570262", "0.45510253", "0.4546734", "0.45457652", "0.4534617", "0.45344183", "0.45342407", "0.45341218", "0.45339024", "0.45333508", "0.45322156", "0.45227775", "0.4521536", "0.4518277", "0.45113742", "0.45056388", "0.44979978", "0.44920564", "0.44907925", "0.44855762", "0.44813353", "0.44770795", "0.44743526", "0.44743526", "0.44734544", "0.4470202", "0.44700396", "0.44622612", "0.44390792", "0.4438595", "0.44341296", "0.44333005", "0.44264913", "0.4422834", "0.44226348", "0.44122314", "0.4410221", "0.4405696", "0.44054008", "0.43961388", "0.43886468" ]
0.6178213
0
Set lifeline's lifetime length.
def _set_length(self, length): self.bottom.pos.y = self.top.pos.y + length
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_tail_length(self, value):\n self.layer.tail_length = value", "def setLength(self, new_length):\n\n self.length = new_length", "def set_lifetime(self, lifetime):\n self.__lifetime = lifetime", "def length(self, length):\n\n self._length = length", "def set_last_segment_length(self, length):\n prior_length = self.segments[-1].get_length()\n if prior_length != -1:\n self.end_time -= prior_length\n\n self.segments[-1].set_length(length)\n self.end_time += length", "def set_line_width(self, val):\n self.lwidth = val", "def set_length(self, new_length):\n if(new_length == None):\n self._logger.write(\"Error! new_length cannot be a NoneType\")\n elif(type(new_length) != float):\n self._logger.write(\"Error! new_length must be of type float\")\n else:\n try:\n self._length = new_length\n except Exception as e:\n self._logger.write(\"Error! Could not set the new length:\\n %s\" % e)", "def set_length(self, ak_tpl: BKT, newLength: float): # -> None:\n ...", "def _set_packet_len(self, packet_len):\n self._packet_len = packet_len", "def set_length(self, length):\n if length < 0:\n raise AttributeError('length should be positive')\n self.progress_char_length = length", "def token_length(self, token_length):\n\n self._token_length = token_length", "def setLength(self, length):\n self.vector.norm = length", "def length(self, length: Union[int, float]):\n self._length = length\n self._update_length()\n self.events.length()\n\n self.refresh()", "def set_part_length(self, seconds):\n self._part_length = seconds", "def setGoalLength(self, length):\n assert isinstance(length, int)\n self.goal_length = length", "async def gpt2_set_length(self, ctx, *, arg=None):\n print('Command gpt2_set_length triggered')\n if arg:\n try:\n i = int(arg)\n assert (i > 0) and (i < 1024)\n except ValueError or AssertionError:\n ctx.send(\"ERROR: Argument must be a positive integer number\")\n self.update_config(length=arg)\n else:\n await ctx.send(\"ERROR: Argument required\")", "def _on_tail_length_change(self, event=None):\n with self.layer.events.tail_length.blocker():\n value = self.layer.tail_length\n value = np.clip(value, 1, MAX_TAIL_LENGTH)\n self.tail_length_slider.setValue(value)", "def _set_length(self):\n if self.nb_points <= 1:\n self.length = 0\n else:\n ldiff_degree = self.coord_list[1:] - self.coord_list[:-1]\n ldiff_meter = ldiff_degree * np.pi * EQUATORIAL_EARTH_RADIUS / 180\n ldiff_meter[:, 0] *= np.cos(self.mean_pos[1] * np.pi / 180)\n self.length = np.sum(\n np.sqrt(ldiff_meter[:, 0] ** 2 + ldiff_meter[:, 1] ** 2)\n )", "def extendSequenceLength(self, timeLength):\n timeLength = self.secToStep(timeLength)\n self._addNewSwitch(timeLength,0,0)", "def set_lives(self, new_number_of_lives):\n self.__lives = new_number_of_lives", "def change_length(self, value):\n self.layer.length = value\n self.lengthSpinBox.clearFocus()\n self.setFocus()", "def set_max_lines(self, n):\n\t\tself._maxLines = n\n\t\tself._trunc_lines()", "def __init__(self, lineLen):\n if lineLen < 10:\n raise ValueError('lineLen cannot be less than 10')\n self.len = lineLen\n self.currentSpace = lineLen\n self.beginningOfLine = True", "def setTickLength(major=24,minor=16):\n dislin.ticlen(major,minor)", "def set_base_length_entry(self, base_length):\n self.entries[\"ent_base_length\"].delete(0, END)\n self.entries[\"ent_base_length\"].insert(\n 0, str(base_length))", "def set_body_size(self, length: int) -> None:\n self._body = [Coord2D(0, 0) for _ in range(length)]\n self._tail_visited = set()\n self.record_tail_location()", "def _on_len_change(self, event=None):\n with self.layer.events.length.blocker():\n self.lengthSpinBox.setValue(self.layer.length)", "def fl_set_object_lsize(ptr_flobject, size):\n _fl_set_object_lsize = library.cfuncproto(\n library.load_so_libforms(), \"fl_set_object_lsize\", \\\n None, [cty.POINTER(xfdata.FL_OBJECT), cty.c_int], \\\n \"\"\"void fl_set_object_lsize(FL_OBJECT * ob, int lsize) \"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n i_size = library.convert_to_intc(size)\n library.keep_elem_refs(ptr_flobject, size, i_size)\n _fl_set_object_lsize(ptr_flobject, i_size)", "def grr_set_flow_timeout(line: Text) -> None:\n args = grr_set_flow_timeout.parser.parse_args(shlex.split(line))\n magics_impl.grr_set_flow_timeout_impl(args.timeout)", "def setLSLimits(*args):\n args[0].Limit.LSLimit.ls_limit = args[1]", "def setMaxWindowLen(self, length):\n return self._set(maxWindowLen=length)", "def setMaxWindowLen(self, length):\n return self._set(maxWindowLen=length)", "def setLives(self,life):\n self._lives = life", "def __padlen(self,l):\n return Utils.padlen(l,self.com.granularity)", "def setSplitLength(self, value):\n return self._set(splitLength=value)", "def set_line_end(self, line_nr):\n self._line_end = line_nr", "def change_log_length(self, log_length):\n len_diff = abs(self.log_length - log_length)\n if log_length > self.log_length:\n for log_group in self.log_names.values():\n for log_array in log_group:\n tmparr = numpy.full(log_length, self.log_arrays[log_array][0]) # generate tmparr with first value from array\n tmparr[-self.log_arrays[log_array].size:] = self.log_arrays[log_array] # fill end with current array\n self.log_arrays[log_array] = tmparr\n tmparr = numpy.zeros(log_length)\n tmparr[:len_diff] = numpy.linspace(self.log_time[0] - len_diff/self.frequency,\n self.log_time[0], len_diff)\n tmparr[-self.log_time.size:] = self.log_time\n self.log_time = tmparr\n else:\n for log_group in self.log_names.values():\n for log_array in log_group:\n tmparr = numpy.zeros(log_length)\n tmparr[:] = self.log_arrays[log_array][-log_length:]\n self.log_arrays[log_array] = tmparr\n tmparr = numpy.zeros(log_length)\n tmparr[:] = self.log_time[-log_length:]\n self.log_time = tmparr\n self.log_length = log_length", "def Resize(self):\n\n self.history_length = int( round( self.owner['time_span']/self.owner['sample_speed']))\n self.FreshStart()", "def set_trailing_sl(self, n_atr: float = 6):\n self.__n_atr = n_atr", "def setNewLen(self):\n self.wordLen = randint(3, 31)", "def reference_nusselt_number_length(self, reference_nusselt_number_length):\n\n self._reference_nusselt_number_length = reference_nusselt_number_length", "def height_lt(self, height_lt):\n\n self._height_lt = height_lt", "def setPacketLength(self):\n self.packetLength = len(self) - PRIMARY_HEADER_BYTE_SIZE - 1", "def setLong(self, name: unicode, value: long) -> None:\n ...", "def _setVals(self, cmd_length=0):\n self.cmd_length = cmd_length", "def set_life(self, value):\n self._life = value", "def set_life(self):\n self.life -= 1", "def setLives(self, lives):\n assert type(lives) == int\n self._lives = lives", "def setMaxLength(self, value):\n return self._set(maxLength=value)", "def setMaxLength(self, value):\n return self._set(maxLength=value)", "def setMaxLength(self, value):\n return self._set(maxLength=value)", "def setMaxLength(self, value):\n return self._set(maxLength=value)", "def llen(self, name):\n self.connect()\n self._write('LLEN %s\\r\\n' % name)\n return self._get_numeric_response()", "def initializeLegend(nlines, maxlen):\n dislin.legini(' ',nlines, maxlen)", "def length(self, value):\n raise TypeError(\"Cannot delete {class-name} length property.\")", "def lmp(self, lmp):\n\n self.logger.debug(\"In 'lmp' setter.\")\n\n self._lmp = lmp", "def __init__(self, length):\r\n self.length = length\r\n self.start_time = int(time.time())", "def set_length(vec, length):\n return normalized(vec) * length", "def setLCLimits(*args):\n args[0].Limit.LCLimit.lc_limit = args[1]", "def random_password_length(self, random_password_length):\n\n self._random_password_length = random_password_length", "def line_length(self, dLine = 0):\n return self.buffer.line_length(self.line + dLine)", "def reset_line_count(self):\n self._line_count = 0", "def extend(self, L):\n self[len(self):] = L", "def length_changed(self, value):\n self.message.dlc = value\n self.validate_data_input(value)", "def set_end(self, end_line):\n self.__end_line = end_line", "def set_alive(self, a, line_number=0):\n self.alive = a\n self._alive_line = line_number", "def len23(self, len): # -> None:\n ...", "def sequence_length(self, new_seq_length):\n self._sequence_length = new_seq_length\n self.tensors = [torch.tensor(\n generate_batches(x, self._sequence_length, self._sequence_stride)).float()\n for x in [self.inputs, self.outputs]]", "def set_width(self, *args):\n return _ida_hexrays.lvar_t_set_width(self, *args)", "def set_length(self, ak_spec: Union[str, BKT], val: float) -> None:\n ...", "def as_length(self, value):\n new_vec = self.copy()\n new_vec.length = value\n return new_vec", "def length(self):\n ...", "def grr_set_default_flow_timeout(line: Text) -> None:\n del line # Unused.\n magics_impl.grr_set_default_flow_timeout_impl()", "def set_size(self, new_size: int):\n self.__tab_size = new_size\n self.__check_interpreter()\n self.__vals = [0 for _ in range(self.__tab_size)]", "def length(self):\n pass", "def set_endline(self, line_no):\n self.set_attribute(\"endline\", line_no)", "def set_linked_lmax(\n self,\n val=None\n ):\n if val != None:\n self.linked_lmax = val", "def set_lives(self, lives):\n self._lives = lives", "def setLong(self, addr: ghidra.program.model.address.Address, value: long) -> None:\n ...", "def adjust_detector_length(requested_detector_length,\n requested_distance_to_tls,\n lane_length):\n\n if requested_detector_length == -1:\n return lane_length - requested_distance_to_tls\n\n return min(lane_length - requested_distance_to_tls,\n requested_detector_length)", "def set_auto_throats_length(self):\n\n for n1, n2 in self.graph.edges:\n self.graph[n1][n2]['length'] = self._compute_auto_throat_length(\n n1, n2)", "def _update_end_lineno():\n if origin:\n record.origin.line_end = lineno", "def __init__(self, length):\n super().__init__([length] * 3)", "def lifetime(self) -> str:\n return pulumi.get(self, \"lifetime\")", "def set_max_log_lines(self, value):\n\t\tself.spinMaxLogLines.set_value(value)", "def lun_count(self, lun_count):\n\n self._lun_count = lun_count", "def _fixHeaderLength(self):\n self.header.seek(0)\n lines = self.header.readlines()\n headlength = len(lines)\n lines[0] = wrapLine(\"NLHEAD_FFI\", self.annotation, self.delimiter, \"%d%s%d\\n\" % (headlength, self.delimiter, self.FFI))\n self.header = StringIO(\"\".join(lines))\n self.header.seek(0)", "def liver(self, liver):\n\n self.logger.debug(\"In 'liver' setter.\")\n\n self._liver = liver", "def change_tail_width(self, value):\n self.layer.tail_width = float(value) / 2.0", "def set_max_sentence_length(self):\n new_max = int(self.set_max_sentence.get())\n cur_min = self.min_sentence_length\n\n if new_max > cur_min:\n self.max_sentence_length = new_max\n else:\n old_max = self.max_sentence_length\n old_max_var = tk.StringVar(self.master)\n old_max_var.set(str(old_max))\n self.set_max_sentence.config(textvariable=old_max_var)", "def __init__(self, *args, **kwargs):\n super(LinlLis, self).__init__(\n ('linl', Bits(maxlen=4)),\n ('lis', Bits(maxlen=4)),\n *args, **kwargs\n )", "def length_minutes(self, length_minutes):\n \n self._length_minutes = length_minutes", "def setLong(self, address: ghidra.program.model.address.Address, value: long) -> None:\n ...", "def set_timeout(self, new_timeout):\n self.timeout = new_timeout\n self._update_timestamp()", "def lineLengthExceeded(self, line):\n segments = self._breakLineIntoSegments(line)\n for segment in segments:\n self.lineReceived(segment)", "def __init__(self, keep_last_n_lines=5) :\r\n self.contextLines_ = keep_last_n_lines\r\n self.data_ = CircularBuffer(1024)\r\n self.lineNumber_ = 1\r\n self.charNumber_ = 0", "def len_literal(self):\n if hasattr(self, '_m_len_literal'):\n return self._m_len_literal if hasattr(self, '_m_len_literal') else None\n\n self._m_len_literal = (self.len_literal_div2 * 2)\n return self._m_len_literal if hasattr(self, '_m_len_literal') else None", "def set_size(self, length, width=None):\n\n length = float(length)\n try:\n width = float(width)\n except:\n pass\n if width is not None:\n self.ang_size = np.sqrt(length * width)\n else:\n self.ang_size = length\n\n ang_size_in_rad = self.ang_size / 60 * np.pi / 180\n self.sr = ct.angle_to_solid_angle(ang_size_in_rad)", "def __init__(self) -> None:\n self.length = 0", "def setMaxSentenceLength(self, value):\n return self._set(maxSentenceLength=value)" ]
[ "0.6628996", "0.6548008", "0.6527316", "0.6292543", "0.61618847", "0.60966504", "0.605155", "0.59509474", "0.59412754", "0.59316313", "0.59247273", "0.5702406", "0.56750685", "0.5643853", "0.5622231", "0.55569696", "0.55228", "0.5480967", "0.54594636", "0.5426649", "0.5416591", "0.5415061", "0.5395622", "0.5348917", "0.5322101", "0.5316463", "0.53009427", "0.5281208", "0.5280256", "0.52669144", "0.5234339", "0.5234339", "0.5201205", "0.5148295", "0.5084994", "0.50651485", "0.5059319", "0.50544727", "0.50509447", "0.50501007", "0.50469553", "0.5044129", "0.5040274", "0.50318587", "0.5029048", "0.5024301", "0.501612", "0.49987596", "0.4997782", "0.4997782", "0.4997782", "0.4997782", "0.49809036", "0.49637124", "0.49553275", "0.4951865", "0.4947657", "0.49470782", "0.49403775", "0.49171615", "0.49137568", "0.4910371", "0.4899165", "0.48912367", "0.4879797", "0.48691186", "0.48671076", "0.48600674", "0.48416224", "0.4830541", "0.482549", "0.4821683", "0.48200938", "0.48129222", "0.48065355", "0.4804439", "0.47883922", "0.4786316", "0.4780585", "0.47727942", "0.47692135", "0.47626528", "0.47599858", "0.47591552", "0.47506195", "0.47467607", "0.4734122", "0.47341177", "0.47317293", "0.4723978", "0.47239318", "0.47206935", "0.47068396", "0.4702105", "0.47020692", "0.4699961", "0.46933934", "0.46830922", "0.4680476", "0.46767548" ]
0.5764367
11
Draw lifeline. We always draw the lifeline's head. We only draw the lifeline's lifetime when the lifetime is visible.
def draw_lifeline(self, box, context, bounding_box): cr = context.cairo cr.rectangle(0, 0, self.width, self.height) stroke(context) if ( context.hovered or context.focused or context.dropzone or self._lifetime.visible ): bottom = self._lifetime.bottom cr = context.cairo with cairo_state(cr): cr.set_dash((7.0, 5.0), 0) x = self._handles[SW].pos.x top = self._lifetime.top cr.move_to(top.pos.x - x, top.pos.y) cr.line_to(bottom.pos.x - x, bottom.pos.y) stroke(context, dash=False) # draw destruction event if self.is_destroyed: d1 = 8 d2 = d1 * 2 cr.move_to(bottom.pos.x - d1, bottom.pos.y - d2) cr.line_to(bottom.pos.x + d1, bottom.pos.y) cr.move_to(bottom.pos.x - d1, bottom.pos.y) cr.line_to(bottom.pos.x + d1, bottom.pos.y - d2) cr.stroke()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_line():\n\n # Small Size Line\n glLineWidth(0.1)\n glColor3f(0.5, 1.0, 0.9)\n wid = 0\n while wid <= width:\n length = 0\n while length <= height:\n glBegin(GL_LINES)\n glVertex3f(0.0, length, 0.0)\n glVertex3f(wid, length, 0)\n glEnd()\n glBegin(GL_LINES)\n glVertex3f(length, 0, 0.0)\n glVertex3f(length, wid, 0)\n glEnd()\n length += 10\n wid += 50\n # Medium Size Line\n glLineWidth(2.0)\n wid = 0\n while wid <= width:\n length = 0\n while length <= height:\n glBegin(GL_LINES)\n glVertex3f(0.0, length, 0.0)\n glVertex3f(wid, length, 0)\n glEnd()\n length += 50\n glBegin(GL_LINES)\n glVertex3f(length, 0, 0.0)\n glVertex3f(length, wid, 0)\n glEnd()\n wid += 50\n # Main Line\n # ordinat\n glLineWidth(1.5)\n glColor3f(0.5, 0.4, 0.8)\n glBegin(GL_LINES)\n glVertex3f(height / 2, 0, 0.0)\n glVertex3f(height / 2, width, 0)\n glEnd()\n # absis\n glBegin(GL_LINES)\n glVertex3f(0, width / 2, 0.0)\n glVertex3f(height, width / 2, 0)\n glEnd()", "def draw_reticle(self):\n glColor3d(0, 0, 0)\n self.reticle.draw(GL_LINES)", "def __draw_line(display, color, ball_pos, dx, dy):\n pygame.draw.line(display, color, ball_pos, (ball_pos[0] + dx, ball_pos[1] + dy), 2)", "def display_line_map(self):\n lh_count = len(flatten(self.lh_data))\n print('{} horizontal line mapping: {} hline draw calls. {} bytes'.format(\n self.char,\n lh_count,\n len(list(self._stream_lhmap()))\n ))\n print('v' * len(''.join([str(i) for i in range(self.width)])), ' y [(x, length)]')\n for y in range(self.height):\n for x in range(self.width):\n space = ' ' if x < 10 else ' '\n char = space if self.pixels[y * self.width + x] else x\n print(char, end='')\n print(' ', '%2d' % y, self.lh_data[y])\n print()\n\n lv_count = len(flatten(self.lv_data))\n print('{} vertical line mapping: {} vline draw calls. {} bytes'.format(\n self.char,\n lv_count,\n len(list(self._stream_lvmap()))\n ))\n print('>' * len(''.join([str(i) for i in range(self.height)])), ' x [(y, length)]')\n for x in range(self.width)[::-1]:\n for y in range(self.height):\n space = ' ' if y < 10 else ' '\n char = space if self.pixels[y * self.width + x] else y\n print(char, end='')\n print(' ', '%2d' % x, self.lv_data[x])\n print()\n\n print('selecting {} mapping for {} char\\n'.format(\n 'lhmap horizontal' if self.is_char_lhmap() else 'lvmap vertical',\n self.char\n ))", "def do_draw_network(self, line):\n self.fibbing.root.lsdb.graph.draw(line)", "def _draw_line(self, event):\n if not self.obstacle_creation_mode:\n return\n\n if self.previous_coordinates is None:\n self.previous_coordinates = event.x, event.y\n self.new_obstacle.append([event.x, event.y])\n return\n\n x1, y1 = event.x, event.y\n\n if self._is_closing_shape(x1, y1, self.new_obstacle):\n x1, y1 = self.new_obstacle[0]\n else:\n self.new_obstacle.append([x1, y1])\n\n x0, y0 = self.previous_coordinates\n self.canvas.create_line(x0, y0, x1, y1, **self.LINE_OPTIONS)\n self.previous_coordinates = x1, y1", "def drawLines(self):\n\t\tintersections = [[], []]\n\t\tfor l in self.lines:\n\t\t\tif l.direction == 'v':\n\t\t\t\tif l.rtc:\n\t\t\t\t\tposition = l.coordinate + int((self.width - 1) / 2)\n\t\t\t\telse:\n\t\t\t\t\tposition = int((l.coordinate * self.width / 100) if type(l.coordinate) == float else l.coordinate)\n\t\t\t\tintersections[0].append(position)\n\t\t\t\tfor yPos in range(1, self.height - 2):\n\t\t\t\t\tself.wts(yPos, position, '│', self._borderColor)\n\t\t\t\t# endpoints\n\t\t\t\tself.wts(0, position, '┬',self._borderColor)\n\t\t\t\tself.wts(self.height - 2, position, '┴', self._borderColor)\n\t\t\telif l.direction == 'h':\n\t\t\t\tif l.rtc:\n\t\t\t\t\tposition = l.coordinate + ((self.height - 1) / 2)\n\t\t\t\telse:\n\t\t\t\t\tposition = int((l.coordinate * self.height / 100) - 1 if type(l.coordinate) == float else l.coordinate)\n\t\t\t\tintersections[1].append(position)\n\t\t\t\tself.wts(position, 1, '─' * (self.width - 2), self._borderColor)\n\t\t\t\t# endpoints\n\t\t\t\tself.wts(position, 0, '├', self._borderColor)\n\t\t\t\tself.wts(position, self.width - 1, '┤', self._borderColor)\n\t\t# draw intersections\n\t\tfor x in intersections[1]:\n\t\t\tfor y in intersections[0]:\n\t\t\t\tself.wts(x, y, '┼', self._borderColor)\n\t\tself.verticalBoundaries = intersections[0]\n\t\tif self.screenBorder:\n\t\t\tself.verticalBoundaries.append(self.width)", "def draw_line(color, start_pos, end_pos, width=1):\n pygame.draw.line(screen, color, start_pos, end_pos, width)", "def draw_lines(self):\n for x_cord in range(0, Dimension.SCREEN_WIDTH.value, Dimension.SQUARE_WIDTH.value):\n pg.draw.line(self.window, Colors.BLACK.value, (x_cord, 0), (x_cord, Dimension.SCREEN_HEIGHT.value))\n\n for y_cord in range(0, Dimension.SCREEN_HEIGHT.value, Dimension.SQUARE_HEIGHT.value):\n pg.draw.line(self.window, Colors.BLACK.value, (0, y_cord), (Dimension.SCREEN_WIDTH.value, y_cord))\n\n pg.display.update()", "def draw(self):\n glColor3f(1.0, 0.0, 0.0)\n glBegin(GL_LINES)\n for vertex in self.edges[0]:\n glVertex3fv(self.vertices[vertex])\n glColor3f(0.0, 1.0, 0.0)\n for vertex in self.edges[1]:\n glVertex3fv(self.vertices[vertex])\n glColor3f(0.0, 0.0, 1.0)\n for vertex in self.edges[2]:\n glVertex3fv(self.vertices[vertex])\n glEnd()", "def draw(self):\n # s1 = ShowPoint(self.cnv, self.p1.xpt, self.p1.ypt)\n # s2 = ShowPoint(self.cnv, self.p2.xpt, self.p2.ypt)\n # s1.draw()\n # # s2.draw()\n self.cnv.create_line(self.p1.xpt, self.p1.ypt, self.p2.xpt, self.p2.ypt)", "def update_line(self):\n self._draw_line_text()\n self._draw_status()\n self._line_listbox.set_focus(self.model.l_index)", "def _render_horizontal(self, gc, lx, ly, rx, ry, mx, my):\n\n with gc:\n gc.set_line_width(20)\n gc.set_stroke_color(self._get_border_color())\n tee_h(gc, lx, ly, mx, my, ry)\n\n gc.set_line_width(10)\n self.set_fill_color(gc)\n tee_h(gc, lx, ly, mx, my, ry)", "def _draw_line_text(self):\n self._line_text.set_text(self.model.get_current_line())", "def display(self, screen: pygame.Surface, line_thickness=3):\n\t\tfor p1, p2 in self.__calculate_points():\n\t\t\tpygame.draw.line(screen, Color(255).get(), p1.get_int(), p2.get_int(), line_thickness)", "def draw_lines(self):\n # draw x lines\n y = self.step_y\n while y <= self.height:\n x = 0\n while x <= self.width:\n self.canvas.create_line(x, y, x+3.5, y)\n self.canvas.update()\n x += 3.5\n y += self.step_y\n \n # draw y lines\n x = self.step_x\n while x <= self.width:\n y = 0\n while y <= self.height:\n self.canvas.create_line(x, y, x, y+3.5)\n self.canvas.update()\n y += 3.5\n x += self.step_x\n \n self.is_operating = False", "def draw(self, camera):\n for line in self._polyline.lines:\n camera.draw_line(line.begin, line.end, self.color, self.width)", "def hline(self, x, y, length, color):\n self.fill_rect(x, y, length, 1, color)", "def draw_bullet(self):\n pygame.draw.rect(self.__screen, self.__color, self.rect)", "def create_line(self):\n if self.hosts and self.line:\n self.msg(\"There is a line here already.\")\n self.display_line()\n return\n self.line = []\n other_hosts = [self.caller.search(arg) for arg in self.lhslist]\n other_hosts = [ob for ob in other_hosts if ob and ob.player]\n other_hosts.append(self.caller)\n self.hosts = other_hosts\n if \"loop\" in self.switches:\n self.toggle_loop()\n self.display_line()", "def _newLine(self, usePos=True):\n if len(self.currentLine) > 1:\n self.screen._drawline(self.currentLineItem, self.currentLine,\n self._pencolor, self._pensize)\n self.currentLineItem = self.screen._createline()\n self.items.append(self.currentLineItem)\n else:\n self.screen._drawline(self.currentLineItem, top=True)\n self.currentLine = []\n if usePos:\n self.currentLine = [self._position]", "def draw_line(self, x0, y0, x1, y1, color=Color['white']):\n pygame.draw.line(self.display, color, (x0, y0), (x1, y1))", "def draw_laser(self):\n pygame.draw.rect(self.screen, self.color, self.rect)", "def drawReference(x, y, z, l):\r\n\r\n glPushMatrix()\r\n\r\n glColor3f(1.0, 0.0, 0.0)\r\n\r\n glBegin(GL_LINES)\r\n glNormal3f(0.0, 0.0, 1.0)\r\n glVertex3f(x, y, z)\r\n glVertex3f(x + l, y, z)\r\n glEnd()\r\n\r\n glColor3f(0.0, 1.0, 0.0)\r\n\r\n glBegin(GL_LINES)\r\n glNormal3f(0.0, 0.0, 1.0)\r\n glVertex3f(x, y, z)\r\n glVertex3f(x, y + l, z)\r\n glEnd()\r\n\r\n glColor3f(0.0, 0.0, 1.0)\r\n\r\n glBegin(GL_LINES)\r\n glNormal3f(0.0, 0.0, 1.0)\r\n glVertex3f(x, y, z)\r\n glVertex3f(x, y, z + l)\r\n glEnd()\r\n\r\n glPopMatrix()", "def draw_bullet(self):\r\n pygame.draw.rect(self.screen, self.color, self.rect)", "def draw_line(self, DISP, side:str, indizes:tuple, pink = False):\r\n offset = 1 #< Just to draw the line nicely\r\n pos = (indizes[0] - 1) * self.grid_size, indizes[1] * self.grid_size\r\n # Check if it's a pink line\r\n if pink:\r\n start_pos = pos[0], pos[1] + self.grid_size // 2\r\n end_pos = pos[0] + self.grid_size, pos[1] + self.grid_size // 2\r\n # Check if the line should be vertically. u for up\r\n elif side == 'u':\r\n start_pos = pos[0] + self.width - offset + self.grid_size // 2, pos[1] + self.grid_size // 2\r\n end_pos = pos[0] + self.grid_size + offset + self.grid_size // 2 - self.width, pos[1] + self.grid_size // 2\r\n # Check if the line should be horizontally. l for left\r\n elif side == 'l':\r\n start_pos = pos[0] + self.grid_size // 2, pos[1] + self.width - offset + self.grid_size // 2\r\n end_pos = pos[0] + self.grid_size // 2, pos[1] - self.width + self.grid_size + offset + self.grid_size // 2\r\n if not pink:\r\n pg.draw.line(DISP, Colors.colors['BLACK'], start_pos,end_pos, self.width + 2 * offset) \r\n else:\r\n pg.draw.line(DISP, Colors.colors['PINK'], start_pos,end_pos, self.width + 2 * offset)", "def add_line(self, text):\n\t\twidth, height = self.font.size(text)\n\t\tpos = (self.rect.left + 10, self.rect.bottom - height- 5)\n\t\trend = self.font.render(text, True, BLACK)\n\t\t# Move all already existing lines up\n\t\tfor i in range(len(self.all_lines)):\n\t\t\toldsurf, oldpos = self.all_lines[i]\n\t\t\tself.all_lines[i] = self.lift_line(oldsurf, height, oldpos)\n\t\t\tcopy = oldsurf.copy()\n\t\t\tcopy.fill(BG_COLOR)\n\t\t\tself.image.blit(copy, oldpos)\n\t\tself.all_lines.append([rend, pos])\n\t\tself.image.blit(rend, pos)", "def draw(self):\r\n pygame.draw.rect(self.screen, self.background_color, self.bounds)\r\n line_window = self.lines[self.scroll_window_top:self.scroll_window_bottom]\r\n for idx,line in enumerate(line_window):\r\n text = self.font.render(line, True, self.foreground_color)\r\n x,y = self._get_x_y_from_pos(self.position[0], self.position[1]+idx)\r\n self.screen.blit(text,(x,y))\r\n \r\n if self.cursor_visible and self.scroll_window_bottom == len(self.lines):\r\n x,y = self._get_x_y_from_pos(len(line_window[-1]), len(line_window))\r\n cursor_rect = pygame.Rect(x,y,\r\n self.text_width,self.text_height)\r\n pygame.draw.rect(self.screen, self.foreground_color, cursor_rect)", "def draw_line(tick_length, tick_label=''):\n line = \"_\" * tick_length\n if tick_label:\n line += ' ' + tick_label\n print(line)", "def draw(self):\n if len(self.__points) >= 2:\n self._total_length = 0\n for i in range(len(self.__points) - 1):\n p1 = self.__points[i]\n p2 = self.__points[i + 1]\n coords = self.__line_segment(p1, p2)\n if not coords is None:\n pyglet.graphics.draw_indexed(4, pyglet.gl.GL_TRIANGLES,\n [0, 1, 2, 1, 2, 3],\n ('v2i', coords),\n ('c4b', self.color * 4)\n )\n coords = self.__line_cap(p2)\n pyglet.graphics.draw_indexed(4, pyglet.gl.GL_TRIANGLES,\n [0, 1, 2, 0, 2, 3],\n ('v2i', coords),\n ('c4b', self.color * 4)\n )", "def draw_bullet(self):\n\t\tpygame.draw.rect(self.screen, self.colour, self.rect)", "def hline(self, x, y, width, color):\n self.rect(x, y, width, 1, color, fill=True)", "def _defLine(self):\n self._dline=GPath(points = [0,100,GAME_WIDTH,100], linewidth = 1.5,\n linecolor = 'cyan')", "def _DrawLineList(*args, **kwargs):\n return _gdi_.DC__DrawLineList(*args, **kwargs)", "def draw_line(self, x1, y1, x2, y2, color):\n painter = QPainter()\n painter.begin(self.lbFFmpeg.pixmap())\n painter.setPen(QColor(color))\n painter.drawLine(x1, y1, x2, y2)\n painter.end()\n self.lbFFmpeg.update()", "def startLineDrawing(self, startPos):\n self.line = LineNodePath(render2d, thickness=2, colorVec=(0.8,0.8,0.8,1))\n self.line.moveTo(startPos)\n t = taskMgr.add(self.drawLineTask, \"drawLineTask\")\n t.startPos = startPos", "def _draw_line(plot, hori, vert, color, text):\n plot.plot(hori, vert, '-o'+color)\n plot.text(hori[-1]-3, vert[-1]+2, text, color=color)", "def DrawLine(*args, **kwargs):\n return _gdi_.PseudoDC_DrawLine(*args, **kwargs)", "def line_layer(self):\n screen_origin = self.ids.mapview.get_window_xy_from(lat1, lon1, self.ids.mapview.zoom)\n screen_destination = self.ids.mapview.get_window_xy_from(lat2, lon2, self.ids.mapview.zoom)\n point_list = [screen_origin[0], screen_origin[1], screen_destination[0], screen_destination[1]]\n\n with self.ids.line.canvas:\n self.ids.line.canvas.clear()\n\n Color(0, 0, 0, .6)\n Line(points=point_list, width=3, joint=\"bevel\")", "def draw_lines(display, coord, box_size, color, bg_color):\n left, top = coord\n stroke = 6\n half_stroke = int(stroke / 2)\n left = left + half_stroke\n top = top + half_stroke\n box_size = box_size - stroke\n for i in range(0, box_size, int(stroke + 2)):\n pygame.draw.line(\n display, color,\n (left, top + i),\n (left + i, top),\n stroke,\n )\n pygame.draw.line(\n display, color,\n (left + i, top + box_size - 1),\n (left + box_size - 1, top + i),\n stroke,\n )\n return", "def draw(self, img):\n self._erase_last_line(self.img)\n\n idxs = np.argwhere(img[:, self._pos] == 0)\n self.prev_y = (idxs.min(), idxs.max())\n\n cv.line(img, (self._pos, 0), (self._pos, self.h), (0, 0, 0), 1)", "def draw_asc_diagonal(self, player):\n if player == 1:\n color = GameData.circle_color\n elif player == 2:\n color = GameData.cross_color\n\n pygame.draw.line(\n self.game_screen,\n color,\n (15, GameData.screen_dim - 15),\n (GameData.screen_dim - 15, 15), GameData.win_line_width)", "def line(self, start, end, color=(255, 255, 255), width=1):\n start = self._transform(start)\n end = self._transform(end)\n\n pygame.draw.line(self.screen, color, start, end, width)", "def draw_line(self, coords, smooth=False, **options):\n # NOTE: Outline does not work because uses paths instead of normal line method.\n # TODO: Add volume param, containing a list of linewidths same length as line\n # or as a function that calculates the width at each node\n # Result is a flow line with varying thickness at each node\n # Have to calculate left/right xy at each node, and use symbol curveto()\n # Easy and really cool...DO IT!\n options = self._check_options(options)\n \n if not hasattr(coords[0], \"__iter__\"):\n coords = _grouper(coords, 2)\n else: coords = (point for point in coords)\n \n # get drawing tools from options\n args = []\n if options[\"fillcolor\"]:\n pen = aggdraw.Pen(options[\"fillcolor\"], options[\"fillsize\"])\n args.append(pen)\n\n if smooth:\n\n # Note: Creation of the aggdraw.Symbol object here can be\n # very slow for long lines; Path is much faster but due\n # to a bug it does not correctly render curves, hence the use\n # of Symbol\n \n pathstring = \"\"\n \n # begin\n coords = _pairwise(coords)\n (startx,starty),(endx,endy) = next(coords)\n pathstring += \" M%s,%s\" %(startx, starty)\n \n # draw straight line to first line midpoint\n midx,midy = (endx + startx) / 2.0, (endy + starty) / 2.0\n pathstring += \" L%s,%s\" %(midx, midy)\n oldmidx,oldmidy = midx,midy\n \n # for each line\n for line in coords:\n # curve from midpoint of first to midpoint of second\n (startx,starty),(endx,endy) = line\n midx,midy = (endx + startx) / 2.0, (endy + starty) / 2.0\n pathstring += \" Q%s,%s,%s,%s\" %(startx, starty, midx, midy)\n oldmidx,oldmidy = midx,midy\n \n # draw straight line to endpoint of last line\n pathstring += \" L%s,%s\" %(endx, endy)\n\n # make into symbol object\n symbol = aggdraw.Symbol(pathstring)\n\n # draw the constructed symbol\n self.drawer.symbol((0,0), symbol, *args)\n\n else:\n\n path = aggdraw.Path()\n \n # begin\n startx,starty = next(coords)\n path.moveto(startx, starty)\n \n # connect to each successive point\n for nextx,nexty in coords:\n path.lineto(nextx, nexty)\n\n # draw the constructed path\n self.drawer.path((0,0), path, *args)", "def draw_line(self, frame, rect):\n print(\"x0, y0, x1, y1\", self.x0, self.y0, self.x1, self.y1)\n print(\"cross_x, cross_y\", self.cross_x, self.cross_y)\n left, top, right, bottom = rect\n # 枠内では線を表示しないようにしてやる\n if top<self.y1<bottom and left<self.x1<right:\n return\n # フレームと線の交点\n if (self.x1 >= right or self.x1 <= left or self.y1 <= top or self.y1 >= bottom) and self.cross_x == 0:\n self.cross_x = self.x1\n self.cross_y = self.y1\n return\n draw = ImageDraw.Draw(frame)\n draw.line((self.cross_x, self.cross_y, self.x1, self.y1), fill=(255, 255, 255), width=3)", "def draw_lines(self, color, points, width = 1, closed = False):\n color = spyral.color._determine(color)\n pygame.draw.aalines(self._surf, color, closed, points)", "def draw_line(self, color, p1: Point, p2: Point, width):\n _p1 = self.T.itrans(p1)\n _p2 = self.T.itrans(p2)\n pg.draw.line(self.screen, color, _p1(), _p2(), 2)", "def paint_axes(self, l=1):\n GL.glBegin(GL.GL_LINES)\n GL.glColor3f(1, 0, 0) # red x axis\n GL.glVertex3f(0, 0, 0)\n GL.glVertex3f(l, 0, 0)\n GL.glColor3f(0, 1, 0) # green y axis\n GL.glVertex3f(0, 0, 0)\n GL.glVertex3f(0, l, 0)\n GL.glColor3f(0, 0, 1) # blue z axis\n GL.glVertex3f(0, 0, 0)\n GL.glVertex3f(0, 0, l)\n GL.glEnd()", "def paint_axes(self, l=1):\n GL.glBegin(GL.GL_LINES)\n GL.glColor3f(1, 0, 0) # red x axis\n GL.glVertex3f(0, 0, 0)\n GL.glVertex3f(l, 0, 0)\n GL.glColor3f(0, 1, 0) # green y axis\n GL.glVertex3f(0, 0, 0)\n GL.glVertex3f(0, l, 0)\n GL.glColor3f(0, 0, 1) # blue z axis\n GL.glVertex3f(0, 0, 0)\n GL.glVertex3f(0, 0, l)\n GL.glEnd()", "def on_draw(event):\n # First, we clear the window in white\n # (it is necessary to do that at every frame)\n gloo.set_clear_color((1.0, 1.0, 1.0, 1.0))\n gloo.clear()\n program.draw(\"line_strip\")", "def _createline(self):\n return self.cv.create_line(0, 0, 0, 0, fill=\"\", width=2,\n capstyle = TK.ROUND)", "def OnDraw(self):\r\n self.SetCurrent()\r\n\r\n glClear(GL_COLOR_BUFFER_BIT)\r\n\r\n glBegin(GL_LINES)\r\n\r\n glColor3f(1.0, 1.0, 1.0)\r\n \r\n \r\n #Just in case these aren't set back to their starting place yet...\r\n self.currentpoint = self.startingpoint\r\n self.currentheading = 0 \r\n \r\n for element in self.finalstring:\r\n if element == '+':\r\n self.currentheading += self.angle\r\n elif element == '-':\r\n self.currentheading -= self.angle\r\n elif element == 'F':\r\n glVertex2i(self.currentpoint[0], self.currentpoint[1])\r\n self.currentpoint = self.NextPoint(self.currentpoint, self.length, self.currentheading)\r\n glVertex2i(self.currentpoint[0], self.currentpoint[1])\r\n elif element == '[':\r\n self.stack.append([self.currentpoint[0], self.currentpoint[1], self.currentheading])\r\n elif element == ']':\r\n popped = self.stack.pop()\r\n self.currentheading = popped.pop()\r\n self.currentpoint = popped\r\n \r\n \r\n glEnd()\r\n self.currentpoint = self.startingpoint\r\n self.currentheading = 0\r\n \r\n \r\n self.SwapBuffers() \r\n\r\n return", "def dline(x, y):\n glClear(GL_COLOR_BUFFER_BIT)\n glColor3f(0.0, 0.0, 1.0)\n glPointSize(10.0)\n glBegin(GL_POINTS)\n while (x <= y):\n glVertex2f(x, x)\n x += 0.05\n glEnd()\n glFlush()", "def line(self, clear_screen=True, x1=10, y1=10, x2=50, y2=50, line_color='black', width=1):\n\n if clear_screen:\n self.clear()\n\n return self.draw.line((x1, y1, x2, y2), fill=line_color, width=width)", "def redraw(self): \r\n\r\n self.arrow.undraw() \r\n pt2 = Point(self.vel*cos(self.angle), self.vel*sin(self.angle))\r\n self.arrow = Line(Point(0,0), pt2).draw(self.win) \r\n self.arrow.setArrow('last')\r\n self.arrow.setWidth(3)", "def redraw(self):\n \n self.arrow.undraw()\n pt2 = Point(self.vel*cos(self.angle), self.vel*sin(self.angle))\n self.arrow = Line(Point(0,0), pt2).draw(self.win)\n self.arrow.setArrow(\"last\")\n self.arrow.setWidth(3)", "def draw_lh_lines(data):\n #hnd = extract_left_hand(data);\n hnd = np.array(data['crop']);\n hand.draw_hand_lines(hnd,data['lhkpss'][data['i']]);\n return hnd;", "def drawPath(self):\r\n bgl.glColor4f(0.8,0.8,0.9,0.01)\r\n bgl.glLineWidth(0.01)\r\n\r\n bgl.glBegin(bgl.GL_LINES)\r\n bgl.glVertex3f(self.p1[0],self.p1[1],self.p1[2])\r\n bgl.glVertex3f(self.p2[0],self.p2[1],self.p2[2])\r\n bgl.glEnd()\r\n\r\n bgl.glNormal3f(0.0,0.0,1.0)\r\n bgl.glShadeModel(bgl.GL_SMOOTH);", "def draw(self, base, level):\n\n a = base.a\n b = base.b\n\n if level > 0:\n delta = base.b - base.a\n px = a.x + delta.x / 3\n py = a.y + delta.y / 3\n rx = a.x + 2 * delta.x / 3\n ry = a.y + 2 * delta.y / 3\n p = Point(px, py)\n r = Point(rx, ry)\n q = Point(rx, ry)\n q.rotate_deg(60, p)\n self.draw(Line(a,p), level-1)\n self.draw(Line(p,q), level-1)\n self.draw(Line(q,r), level-1)\n self.draw(Line(r,b), level-1)\n else:\n self.container.window.create_line(a.x, a.y, b.x, b.y)", "def draw(cls, lcfg, parent):\n axes = parent.getAxes()\n props = {}\n line = None\n label = None\n idx = int(lcfg.key)\n for child in lcfg.children:\n key = child.key\n val = child.val\n if key == cls.XY:\n xvals = []\n yvals = []\n for xy in val:\n x, y = xy\n xvals.append(x)\n yvals.append(y)\n line = axes.plot(xvals, yvals)\n elif key in cls.KEYS:\n props[key] = val\n if line == None:\n raise ValueError(\"No xy data found in %s.%s\" %(parent, idx))\n for key, val in props.iteritems():\n if key == LABEL:\n label = val\n if key == MARKER:\n plt.setp(line, marker=val)\n elif key == MARKERSIZE:\n plt.setp(line, markersize=val)\n elif key == CONN:\n plt.setp(line, linestyle=val)\n elif key == WIDTH:\n plt.setp(line, linewidth=val)\n elif key == COLOR:\n plt.setp(line, color=val)\n newline = Line2D(line, parent, label)\n parent.addLine(newline)\n return newline", "def draw():", "def draw_line(self, gray=0, nextline=0):\n\n self.fontsize = 4\n if nextline:\n self.nextline()\n else:\n self.linespace(8)\n self.resetx()\n c = self.canvas\n c.setStrokeGray(gray)\n c.setLineWidth(1)\n #self.y = self.y + self.linespacing + (self.fontsize/2)\n c.line(self.x, self.y, self.width - self.x, self.y)\n self.y = self.y + (self.linespacing)", "def draw():\n screen.fill((0, 0, 0))\n alien.draw()", "def wdraw_line(self, wx0, wy0, wx1, wy1, color, arrow):\r\n dx0, dy0 = self.w_to_d(wx0, wy0)\r\n dx1, dy1 = self.w_to_d(wx1, wy1)\r\n self.canvas.create_line(dx0, dy0, dx1, dy1, fill=color, arrow=arrow)", "def draw(self):\n arcade.draw_xywh_rectangle_filled(\n self.x, self.y, self.width, self.height, self.fill.color\n )\n arcade.draw_xywh_rectangle_outline(\n self.x, self.y, self.width, self.height, self.pen.color, 3\n )", "def draw_horizontal_winning_line(self, row, player):\n posY = row * GameData.square_size + GameData.square_size // 2\n\n if player == 1:\n color = GameData.circle_color\n elif player == 2:\n color = GameData.cross_color\n\n pygame.draw.line(\n self.game_screen,\n color, (15, posY),\n (GameData.screen_dim - 15, posY),\n GameData.win_line_width\n )", "def my_simple_line(master, name, r, c, rsp, csp, px, py) -> object:\n line = tk.Label(master=master, text=name, anchor='w')\n line.grid(row=r, column=c, rowspan=rsp, columnspan=csp, padx=px, pady=py)\n return line", "def DrawLine(*args, **kwargs):\n return _gdi_.DC_DrawLine(*args, **kwargs)", "def draw(self):\n self.drawLine()\n\n for l in range(0, self.height):\n print(\"|\", end='', flush=True)\n for c in range(0, self.width):\n print(\" \" + str(self.grid[l][c]) + \" |\", end='', flush=True)\n print(\"\\n\", end='', flush=True)\n\n self.drawLine()", "def draw(x,y,x1,y1,d,color=1):\n d.add(dxf.line((x,y),(x1,y1),color=color, layer='LINES',thickness=0.01))", "def draw(self, screen):\n lines = self.text.strip().split('\\n')\n y = self.y\n for line in lines:\n self.ui.show_text(line, (self.x, y), 30)\n y += 32", "def draw_bullet(self):\n self.screen.blit(self.image, self.rect)", "def draw_bullet(self):\n self.screen.blit(self.image, self.rect)", "def _plot_line(self, image, x1, y1, x2, y2, width, color):\n\n draw = ImageDraw.Draw(image, \"RGBA\")\n draw.line([x1, y1, x2, y2], fill=color, width=width)\n del draw\n\n return image", "def draw_lines(self, color, points, width=1, closed=False):\n if width == 1:\n pygame.draw.aalines(self._surf, color, closed, points)\n else:\n pygame.draw.lines(self._surf, color, closed, points, width)\n self._version += 1\n spyral.util.scale_surface.clear(self._surf)\n return self", "def _render_vertical(self, gc, lx, ly, rx, ry, mx, my):\n mx = lx + (rx - lx) / 2.\n with gc:\n gc.set_line_width(20)\n gc.set_stroke_color(self._get_border_color())\n tee_v(gc, lx, ly, rx, mx, my)\n\n gc.set_line_width(10)\n self.set_fill_color(gc)\n tee_v(gc, lx, ly, rx, mx, my)", "def line(self, drawer, canvas):\n start_width = random.randint(\n self._width / 8, self._width / 4)\n start_height = random.randint(\n self._height / 4, self._height * 3 / 4)\n stop_width = random.randint(\n self._width * 3 / 4, self._width * 7 / 8)\n stop_height = random.randint(\n self._height / 4, self._height * 3 / 4)\n drawer.line(\n (start_width,\n start_height,\n stop_width,\n stop_height),\n fill=random.randint(128, 155),\n width=3\n )", "def draw_lines_slow(orbit_pos, factor):", "def draw_desc_diagonal(self, player):\n if player == 1:\n color = GameData.circle_color\n elif player == 2:\n color = GameData.cross_color\n\n pygame.draw.line(self.game_screen, color, (15, 15), (GameData.screen_dim - 15, GameData.screen_dim - 15),\n GameData.win_line_width)", "def line(l, color='k', **kwargs):\n ax.plot(wfl(nth(l, 0)), hfl(nth(l, 1)), color=color, **kwargs)", "def plot(self, **kwargs):\n base.plot_homline(self.line, **kwargs)", "def _draw_line(event, x, y, flags, params):\n global img, source_img\n global p1, p2\n if event == cv2.EVENT_LBUTTONDOWN:\n img = source_img.copy()\n p1 = (x, y)\n elif event == cv2.EVENT_LBUTTONUP:\n p2 = (x, y)\n img = source_img.copy()\n text = 'position: %d' % p2[0]\n cv2.putText(img, text, (100, 100),\n cv2.FONT_HERSHEY_SIMPLEX, 3, DrawingShapeUtils.COLOR, \n DrawingShapeUtils.LINE_THICKNESS)\n cv2.line(img, (x, y+100), (x, y-100), DrawingShapeUtils.COLOR,\n DrawingShapeUtils.LINE_THICKNESS)", "def line(self, points, ls=\"--\", draw=\"black\", lw=None, options=None, kwoptions=None):\n\n draw = norm_colour(draw)\n self.use_colour(draw)\n\n if kwoptions is None:\n kwoptions = {}\n kwopts = {'draw': draw, **kwoptions}\n if lw:\n kwopts['line width'] = lw\n\n self._commands.append(rf\"\\draw{wrap(fmt_options(options,kwopts))} \" +\n f\" {ls} \".join(map(fmt_point, points))+\";\")", "def hline(self, xi: int, yi: int, length: int, color: int):\n for x in range(length):\n self.pixel(xi + x, yi, color)", "def draw_log(self):\n if self.logwin == None:\n return\n self.logwin.clear()\n max_y, max_x = self.logwin.getmaxyx()\n lines = self.list_prev_lines(max_y)\n k = 0\n n = max_y - 1\n while n >= 0:\n if k < len(lines):\n line = lines[k]\n k += 1\n else:\n break\n if len(line) == 0:\n line = '~'\n line = line.rstrip('\\n\\r').expandtabs(4)\n blocks = self.split_string(line, max_x-1)\n blocks.reverse()\n for block in blocks:\n try:\n self.logwin.addstr(n, 0, block)\n except Exception as e:\n logging.warning('An error occured in the curses library.')\n logging.debug(e, exc_info=1)\n n -= 1\n if n < 0:\n break\n self.logwin.noutrefresh()\n return", "def plotLines( self ):\n \n ## plot tree in dfs manner\n def plotLines( node_id ):\n\n node = self.mTree.node( node_id )\n\n left = self.mNodeWidthsStart[node_id]\n right = self.mNodeWidthsEnd[node_id]\n height = self.mNodeHeights[node_id] \n\n if right != left and node_id != self.mTree.root:\n self.addElements( self.mDecoratorHorizontalBranches.getElements(\n node_id,\n self.getHeaderWidth() + left,\n self.getHeaderWidth() + right,\n self.getHeaderHeight() + height ))\n \n\n for s in node.succ:\n\n new_height = self.mNodeHeights[s]\n self.addElements( self.mDecoratorVerticalBranches.getElements(\n node_id,\n self.getHeaderWidth() + right,\n self.getHeaderHeight() + height,\n self.getHeaderHeight() + new_height ))\n \n TreeTools.TreeDFS( self.mTree, self.mTree.root,\n pre_function = plotLines )", "def Draw_Node( self, node, xstart, ystart):\r\n xdist = node.data.length * cb.xtick\r\n handle = self.canvas_one.create_line( xstart, ystart, xstart+xdist, ystart,width = 3, fill=self.branch_color )\r\n #Attach a handle to a node and place in the handle_list of all LineSegments\r\n ls = LineSegment( handle, node )\r\n self.handle_list.append(ls)\r\n return ystart", "def HorizLine(self, parent, depth=3):\n line = sppasStaticLine(parent, orient=wx.LI_HORIZONTAL)\n line.SetMinSize(wx.Size(-1, depth))\n line.SetSize(wx.Size(-1, depth))\n line.SetPenStyle(wx.PENSTYLE_SOLID)\n line.SetDepth(depth)\n line.SetForegroundColour(self.GetForegroundColour())\n return line", "def on_draw(self):\n # Clearing the buffers\n self.clear()\n self.set3d()\n # Makes it so color can be added\n glColor3d(1, 1, 1)\n\n self.push(self.player.pos, self.player.rot)\n self.model.draw()\n glPopMatrix()\n self.model.process_queue_slowly()\n\n # Draws the crosshairs on the screen\n self.set2d()\n self.draw_position_label()\n self.draw_reticle()", "def draw_line():\n global y1, y2\n canvas.create_line(x1, y1, x2, y2, width=2, fill=color)\n y1 -= 10\n y2 += 10", "def __draw(self, display, color, size):\n\t\tif self.walls[0]: # up\n\t\t\tpygame.draw.line(display, color, (self.col * size , self.row * size) , (self.col * size + size, self.row * size))\n\t\tif self.walls[3]: # down\n\t\t\tpygame.draw.line(display, color, (self.col * size + size, self.row * size + size), (self.col * size , self.row * size + size))\n\t\tif self.walls[1]: #left\n\t\t\tpygame.draw.line(display, color, (self.col * size + size, self.row * size) , (self.col * size + size, self.row * size + size))\n\t\tif self.walls[2]: #right\n\t\t\tpygame.draw.line(display, color, (self.col * size , self.row * size + size), (self.col * size , self.row * size))\n\n\t\tif self.current:\n\t\t\tdraw_rect_with_alpha(display, self.CURRENT_COLOR, Vector((self.col, self.row)) * size, (size, size))\n\n\t\telif self.backtracked and self.SHOW_BACKTRACK:\n\t\t\tdraw_rect_with_alpha(display, self.BACKTRACKED_COLOR, Vector((self.col, self.row)) * size, (size, size))\n\n\t\telif self.visited:\n\t\t\tdraw_rect_with_alpha(display, self.VISITED_COLOR, Vector((self.col, self.row)) * size, (size, size))", "def drawSlope(self):\n length = sqrt(1 + self.slope**2) # Length of the line segment over 1 x-unit\n xOffset = (segmentLength / length) / 2 # Figures out how many times the length of the 1 unit length fits into the desired length\n # then divides by 2 becuase half is on the left and half on the right of the center\n\n\n # Left end point\n xLeft = self.x - xOffset\n yLeft = (self.slope * (xLeft - self.x)) + self.y\n\n # Right end point\n xRight = self.x + xOffset\n yRight = (self.slope * (xRight - self.x)) + self.y\n\n\n # Converts the left and right end points from cartesian coordinates to screen coordinates\n left = cartesianToScreen(xLeft , yLeft)\n right = cartesianToScreen(xRight, yRight)\n\n\n pygame.draw.aaline(display, self.color, left, right, 1) # DRAWS THE LINE AHHHHHHHHHHHHHHHHHH :P", "def display_hline():\n for i in range(12):\n print(\"-\", end=\"\")\n print()", "def drawLine(x0,y0,x1,y1,ucoords=1):\n if ucoords:\n dislin.rline(x0,y0,x1,y1)\n else:\n dislin.line(x0,y0,x1,y1)", "def draw_line(self, x):\n self.PDF.setStrokeColor(black01)\n self.PDF.setLineWidth(1)\n self.PDF.line(75, x, 550, x)\n self.PDF.setStrokeColor(\"black\")", "def draw_line(self, x0, y0, x1, y1, color=None, colorFunc=None, aa=False):\n if aa:\n self._draw_wu_line(x0, y0, x1, y1, color, colorFunc)\n else:\n self._draw_bresenham_line(x0, y0, x1, y1, color, colorFunc)", "def draw_lines(point_list, color, border_width=1):\n GL.glEnable(GL.GL_BLEND)\n GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)\n GL.glEnable(GL.GL_LINE_SMOOTH)\n GL.glHint(GL.GL_LINE_SMOOTH_HINT, GL.GL_NICEST)\n GL.glHint(GL.GL_POLYGON_SMOOTH_HINT, GL.GL_NICEST)\n\n GL.glLoadIdentity()\n\n # Set line width\n GL.glLineWidth(border_width)\n\n # Set color\n if len(color) == 4:\n GL.glColor4ub(color[0], color[1], color[2], color[3])\n elif len(color) == 3:\n GL.glColor4ub(color[0], color[1], color[2], 255)\n\n GL.glBegin(GL.GL_LINES)\n for point in point_list:\n GL.glVertex3f(point[0], point[1], 0.5)\n GL.glEnd()", "def drawLine(self,start,stop):\n startX = int(self.vert[start][0]*self.scale + self.size/2)\n startY = int(self.vert[start][1]*self.scale + self.size/2)\n endX = int(self.vert[stop][0]*self.scale + self.size/2)\n endY = int(self.vert[stop][1]*self.scale + self.size/2)\n \n self.canvas.create_line(startX,startY,endX,endY,fill='white')", "def vline(self, x, y, height, color):\n self.rect(x, y, 1, height, color, fill=True)", "def draw(self):\n if self.state == 'alive':\n for i in range(len(self.tail)):\n pygame.draw.rect(display, black, (squareToXPix(self.tail[-(i + 1)][0], objectSize), squareToYPix(self.tail[-(i + 1)][1], objectSize), objectSize, objectSize))\n\n pygame.draw.rect(display, black, (squareToXPix(self.x, objectSize), squareToYPix(self.y, objectSize), objectSize, objectSize))\n\n else:\n for i in range(len(self.tail)):\n pygame.draw.rect(display, red, (squareToXPix(self.tail[-(i + 1)][0], objectSize), squareToYPix(self.tail[-(i + 1)][1], objectSize), objectSize, objectSize))\n\n pygame.draw.rect(display, red, (squareToXPix(self.x, objectSize), squareToYPix(self.y, objectSize), objectSize, objectSize))" ]
[ "0.6687818", "0.6577214", "0.65438086", "0.64897895", "0.63934726", "0.63052434", "0.6266094", "0.6217813", "0.61868715", "0.61665386", "0.6159552", "0.6142421", "0.6132305", "0.60893184", "0.60786086", "0.60565233", "0.60377926", "0.60295296", "0.6012153", "0.59879833", "0.59606403", "0.595874", "0.59582007", "0.59281206", "0.5923167", "0.59134054", "0.5903696", "0.5900028", "0.5886914", "0.5865786", "0.5841419", "0.5841355", "0.58362246", "0.5829959", "0.582558", "0.581513", "0.5806804", "0.58012825", "0.57934916", "0.5790543", "0.57851416", "0.5783564", "0.5770053", "0.57697934", "0.5767894", "0.576559", "0.5764961", "0.57517326", "0.57517326", "0.574389", "0.5731478", "0.5723073", "0.5717405", "0.57008255", "0.56977636", "0.569275", "0.56790674", "0.5670691", "0.5657995", "0.5657743", "0.5647384", "0.56401104", "0.56198317", "0.5616447", "0.5606434", "0.56054157", "0.5601878", "0.5597472", "0.5595401", "0.5595292", "0.5593592", "0.55933535", "0.55933535", "0.5585213", "0.5583836", "0.55806226", "0.5575466", "0.55726594", "0.55712384", "0.55709654", "0.55666095", "0.55665445", "0.5565574", "0.5559607", "0.5556027", "0.555597", "0.55487484", "0.5542114", "0.55406755", "0.5534779", "0.5534259", "0.55331475", "0.5522396", "0.5522202", "0.55215156", "0.5515157", "0.55084217", "0.55066544", "0.54928094", "0.5489795" ]
0.6821605
0
Find distance to lifeline item. We calculate the distance to the lifeline's head, and then we calculate the lifetime. We return the minimum.
def point(self, x, y): d1 = super().point(x, y) top = self._lifetime.top bottom = self._lifetime.bottom d2 = distance_line_point(top.pos, bottom.pos, (x, y))[0] return min(d1, d2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _calc_min_distance(self, walker):\n\n cell_lengths, cell_angles = box_vectors_to_lengths_angles(walker.state['box_vectors'])\n\n t2 = time.time()\n # make a traj out of it so we can calculate distances through\n # the periodic boundary conditions\n walker_traj = mdj.Trajectory(walker.state['positions'],\n topology=self._mdj_top,\n unitcell_lengths=cell_lengths,\n unitcell_angles=cell_angles)\n\n t3 = time.time()\n # calculate the distances through periodic boundary conditions\n # and get hte minimum distance\n min_distance = np.min(mdj.compute_distances(walker_traj,\n it.product(self.ligand_idxs,\n self.receptor_idxs),\n periodic=self._periodic)\n )\n t4 = time.time()\n logging.info(\"Make a traj: {0}; Calc dists: {1}\".format(t3-t2,t4-t3))\n\n return min_distance", "def get_min_distance(self, node):\r\n if self.have_min_distance(node):\r\n return self.table[node][\"dist\"]\r\n return None", "def _minimum_distance(self,arg):\n return min([abs(arg-e) for e in self if not e is arg])", "def lidar_relative(self):\n return self.distance", "def min_distance(distance, spt_set, self_nodes):\n minimum = sys.maxsize\n minimum_node = None\n for curr_node in self_nodes.values():\n if distance[curr_node.id] < minimum and not spt_set[curr_node.id]:\n minimum = distance[curr_node.id]\n minimum_node = curr_node\n return minimum_node", "def linelength(l):\n return dist(l[0],l[1])", "def get_min_distance(self):\n return round(min(self.combined_euclidian_distance))", "def getMinNode(self):\n currentNode = self.openList[0]\n for node in self.openList:\n if node.g + node.h < currentNode.g + currentNode.h:\n currentNode = node\n return currentNode", "def _distance_next(self):\n\n self.distance = 10\n\n # Here a set index to 0 if the car is finishing a lap\n # Also reset the farthest\n if self.index > (len(self.x_trajectory) - 6) and self.closed:\n self.index = 0\n self.farthest = -1\n self.laps += 1\n\n for w in range(self.index, self.index + 20):\n\n self.dist_point = math.sqrt((self.x_trajectory[w] - self.x)**2\n + (self.y_trajectory[w] - self.y)**2)\n\n if self.dist_point < self.distance:\n self.distance = self.dist_point\n self.index = w\n\n if w >= (len(self.x_trajectory) - 1):\n break\n\n self._calc_side()\n\n self.distance = self.distance * self.sign\n\n return self.distance", "def minDist(l, a, b):\n pre = 0\n rt = float('INF')\n for i in range(len(l)):\n if l[i] == a or l[i] == b:\n pre = i\n break\n\n for i in range(pre+1, len(l)):\n if l[i] == a or l[i] == b:\n if l[i] != l[pre] and i - pre < rt:\n rt = i - pre\n pre = i\n return rt", "def top_of_climb_distance(self):\n return self.distances[self.top_of_climb_index()]", "def top_of_descent_distance(self):\n return self.distances[self.top_of_descent_index()]", "def previous_min(L):\n\n return itertoolsextra.max_diff(L)", "def minimal_distance(me):\n smallest_d = 101 # given length of edge <= 100\n ismallest = -1 # index of the edge in the list, me\n for i, e in enumerate(me):\n if e[0] < smallest_d:\n smallest_d = e[0]\n ismallest = i\n\n d = me[ismallest][0]\n v1 = me[ismallest][1]\n v2 = me[ismallest][2]\n me.pop(ismallest)\n\n smallest_d = 101\n for i, e in enumerate(me):\n if (e[1] == v1 or e[2] == v1 or e[1] == v2 or e[2] == v2) and e[0] < smallest_d:\n smallest_d = e[0]\n\n d += smallest_d\n return d", "def LineMinDistanceTo(line, point_or_line):\n line = rhutil.coerceline(line, True)\n test = rhutil.coerceline(point_or_line)\n if test is None: test = rhutil.coerce3dpoint(point_or_line, True)\n return line.MinimumDistanceTo(test)", "def find_min_distance():\n return np.argmin(d)", "def min_distance(self, target):\n difference = self.pivot - target\n return max(math.sqrt(np.dot(difference, difference)) - self.radius, 0)", "def longest_flight(self):\r\n distance = 0\r\n for code, _list in self.edges.items():\r\n for edge in _list:\r\n if edge.distance > distance:\r\n distance = edge.distance\r\n start = edge.start\r\n destination = edge.destination\r\n return start, destination, distance", "def _findMinNode(self, s):\n\n minNode = None\n minVal = self.inf\n for vertex in s:\n if self.dist[vertex] < minVal:\n minVal = self.dist[vertex]\n minNode = vertex\n return minNode", "def find_min(ls):\n\n if len(ls) == 1:\n return ls[0]\n elif len(ls) == 2:\n return ls[0] if ls[0] < ls[1] else ls[1]\n else:\n mid = len(ls) // 2\n m1 = find_min(ls[0:mid])\n m2 = find_min(ls[mid:])\n return m1 if m1 < m2 else m2", "def shortest_flight(self):\r\n distance = sys.maxsize\r\n for code, _list in self.edges.items():\r\n for edge in _list:\r\n if edge.distance < distance:\r\n distance = edge.distance\r\n start = edge.start\r\n destination = edge.destination\r\n return start, destination, distance", "def get_closest_distance_to_path(self, path):\n min_distance_to_line = float(\"inf\")\n for p in path:\n game_path = p[:]\n\n game_path.sort(key = lambda coord: calculate_distance(self, coord))\n point_A = game_path[0] # Closest point out of all the points on the path to to the tower\n\n try:\n point_after_A = p[p.index(point_A) + 1]\n point_before_A = p[p.index(point_A) - 1]\n closest_to_A = min(point_after_A, point_before_A, key = lambda point: calculate_distance(point_A, point))\n except:\n if p.index(point_A) == 0:\n closest_to_A = p[p.index(point_A) + 1]\n \n elif p.index(point_A) == len(p) - 1:\n closest_to_A = p[p.index(point_A) - 1]\n finally:\n if closest_to_A[0] != point_A[0]:\n m = (closest_to_A[1] - point_A[1]) / (closest_to_A[0] - point_A[0])\n else:\n m = 2\n\n b = point_A[1] - m * point_A[0]\n\n closest_distance = abs(-m * self.x + self.y - b) / math.sqrt((-m) ** 2 + 1)\n min_distance_to_line = min(closest_distance, min_distance_to_line)\n \n return min_distance_to_line", "def closest(self, x):\n # http://www.ahinson.com/algorithms_general/Sections/Geometry/PluckerLine.pdf\n # has different equation for moment, the negative\n\n x = arg.getvector(x, 3)\n\n lam = np.dot(x - self.pp, self.uw)\n p = self.point(lam) # is the closest point on the line\n d = np.linalg.norm( x - p)\n \n return namedtuple('closest', 'p d lam')(p, d, lam)", "def _find_min(self):\n if self.is_empty(): # is_empty inherited from base class\n raise Empty('Priority queue is empty')\n small = self._data.first()\n walk = self._data.after(small)\n while walk is not None:\n if walk.element() < small.element():\n small = walk\n walk = self._data.after(walk)\n return small", "def distance_to_current_waypoint(self):\n next_waypoint = self.vehicle.commands.next\n if next_waypoint == 1:\n return None\n mission_item = self.vehicle.commands[next_waypoint]\n lat = mission_item.x\n lon = mission_item.y\n alt = mission_item.z\n waypoint_location = Location(lat, lon, alt, is_relative=True)\n distance = get_distance_meters(self.vehicle.location, waypoint_location)\n return distance", "def h(self,node):\n \"*** YOUR CODE HERE ***\"\n dist_arr = [] #Initialize Array\n for goal in self.goals: # Iterate through Goals\n dist_arr.append(manhattan_distance_with_heading(node.state, goal)) # Add distance between node and goal\n return min(dist_arr) # Return minimum", "def _get_distance(reindeer, race_time):\n interval = reindeer.flight_time + reindeer.rest_time\n cycles = race_time // interval\n flight_time = min(reindeer.flight_time, race_time - interval * cycles)\n total_flying_time = reindeer.flight_time * cycles + flight_time\n return total_flying_time * reindeer.flight_speed", "def closest_distance(node_a, node_b):\n min_distance = 999999\n for loc_a in node_a.locations:\n for loc_b in node_b.locations:\n distance = abs(loc_a - loc_b)\n if distance < min_distance:\n min_distance = distance\n return min_distance", "def nn(x, S, dist):\n\n # note that there might be more than on minimal item. min will return the\n # first one ecountered\n return min(S, key=lambda y: dist(x, y[:-1]))", "def get_distance(self):\n values = self.speakers.values()\n values.sort(reverse=True)\n try:\n return abs(values[1]) - abs(values[0])\n except (IndexError, ValueError):\n return -1", "def get_closest_lane(self, car: Car) -> AbstractLane:\n pos = car.position\n lanes = self.upstream_lane_list if car.lane.upstream else self.downstream_lane_list\n return min(lanes, key=lambda l: l.distance(pos))", "def calculate_distance_edge(self):\n mu_star = -np.sqrt(1. - (self.cell_xl / self.x)**2)\n\n if self.mu <= mu_star:\n\n l_edge = (-self.mu * self.x -\n np.sqrt(self.mu**2 * self.x**2 -\n self.x**2 + self.cell_xl**2))\n self.next_cell_index = self.cell_index - 1\n\n else:\n\n l_edge = (-self.mu * self.x +\n np.sqrt(self.mu**2 * self.x**2 -\n self.x**2 + self.cell_xr**2))\n self.next_cell_index = self.cell_index + 1\n\n return l_edge", "def shortest_distance_to(self, pt):\n return self._nearest_to_point(pt)[0]", "def _get_distance_betweenitems(self, item_no1, item_no2):\n\n try:\n if item_no1 >= 0 and item_no2 >= 0:\n loc_current = self.page_current.item_onscreenlocs[item_no1]\n loc_potential = self.page_current.item_onscreenlocs[item_no2]\n distance = abs(loc_potential - loc_current)\n else:\n distance = 0\n\n except IndexError:\n distance = 0\n\n return distance", "def distance_to_current_waypoint():\n nextwaypoint = vehicle.commands.next\n if nextwaypoint == 0:\n return None\n missionitem = vehicle.commands[nextwaypoint -\n 1] #commands are zero indexed\n lat = missionitem.x\n lon = missionitem.y\n alt = missionitem.z\n targetWaypointLocation = LocationGlobalRelative(lat, lon, alt)\n distancetopoint = get_distance_metres(vehicle.location.global_frame,\n targetWaypointLocation)\n return distancetopoint", "def find_min(self):\n return self.min", "def find_min(self):\n return self.min", "def distancetoline(p, l1, l2):\n vx = l1.x-p.x \n vy = l1.y-p.y\n ux = l2.x-l1.x\n uy = l2.y-l1.y\n\n length = ux*ux+uy*uy;\n\n det = (-vx*ux)+(-vy*uy); \n # if this is < 0 or > length then its outside the line segment\n if det<0 or det>length:\n ux=l2.x-p.x\n uy=l2.y-p.y\n return sqrt(min(vx*vx+vy*vy, ux*ux+uy*uy))\n\n det = ux*vy-uy*vx\n if length == 0.0:\n return 0.0\n else:\n return sqrt((det*det)/length)", "def closest_point(self, l):\n cos = np.dot(self.direction, l.direction)\n n = 1 - cos ** 2\n if n < sys.float_info.epsilon:\n # Lines are parallel.\n return self.zero\n\n d0 = l.zero - self.zero\n a = np.dot(d0, self.direction)\n b = np.dot(d0, l.direction)\n return self.zero + self.direction * ( a - b * cos) / n", "def minimum_distance(object_1, object_2):\n\n # package import\n import numpy as np\n\n # main algorithm\n minimum_distance = 100000\n\n for coord_1 in object_1:\n for coord_2 in object_2:\n distance_btwn_coords = np.linalg.norm(coord_1 - coord_2)\n if distance_btwn_coords == 0:\n minimum_distance = distance_btwn_coords\n return float(minimum_distance)\n elif distance_btwn_coords < minimum_distance:\n minimum_distance = distance_btwn_coords\n\n return float(minimum_distance)", "def get_distance(self, point):\n if not isinstance(point, Point):\n point = Point(*point)\n\n distances = [(point.distance_to_point(p), p) for p in self.points]\n sortpoints = sorted(distances, key=lambda x: x[0])\n closest = sortpoints[0][1]\n\n vc = Vector(*closest)\n d1 = vc.dot(vc)\n\n secondc = sortpoints[1][1]\n vs = Vector(*secondc)\n v1 = Vector(*point) - (vc+vs)/2\n v2 = vs-vc\n v2.unitize()\n d2 = v1.dot(v2)\n\n return abs(min(d1, d2)) - self.thickness/2", "def __find_max_distance(self):\n return utils.find_max_distance(self.__game)", "def find_min(self):\n return min(self.nodes, key=int)", "def calculate_distance_line(\n r_packet, comov_nu, is_last_line, nu_line, time_explosion\n):\n\n nu = r_packet.nu\n\n if is_last_line:\n return MISS_DISTANCE\n\n nu_diff = comov_nu - nu_line\n\n # for numerical reasons, if line is too close, we set the distance to 0.\n if r_packet.is_close_line:\n nu_diff = 0.0\n r_packet.is_close_line = False\n\n if nu_diff >= 0:\n distance = (nu_diff / nu) * C_SPEED_OF_LIGHT * time_explosion\n else:\n print(\"WARNING: nu difference is less than 0.0\")\n raise MonteCarloException(\n \"nu difference is less than 0.0; for more\"\n \" information, see print statement beforehand\"\n )\n\n if numba_config.ENABLE_FULL_RELATIVITY:\n return calculate_distance_line_full_relativity(\n nu_line, nu, time_explosion, r_packet\n )\n return distance", "def _compute_smallest_step_len_for_candidate_vector(x_candidate, z_min):\n ta, tb = _solve_scalar_quadratic_equation(x_candidate, z_min)\n step_len = min([ta, tb], key=abs)\n\n return step_len", "def get_closest_node(data, loc):\n min_dist = None\n closest = None\n for i in data:\n # Standard min-value search loop\n dist = great_circle_distance(get_coords(data, i), loc)\n if closest is None or dist < min_dist:\n closest = i\n min_dist = dist\n return closest", "def extract_minOld(H):\n minDist = approxInf\n u = None\n for v in H:\n if v[1] <= minDist:\n minDist = v[1]\n u = v\n return(H.pop(u))", "def closest_waypoint(self, location: pylot.utils.Location):\n min_dist = np.infty\n min_index = 0\n for index, waypoint in enumerate(self.waypoints):\n dist = waypoint.location.distance(location)\n if dist < min_dist:\n min_dist = dist\n min_index = index\n return min_index", "def find_min(list):\n return find_value_at(list, -1)", "def smallest (self):\n return self.pointers[0].smallest()", "def find_middle_point(self):\n leaf1, longest_dist = None, 0.0\n for leaf in self.leaves:\n dist = sum(self.path_dists[leaf])\n if dist > longest_dist:\n leaf1 = leaf\n longest_dist = dist\n leaf2, longest_dist = None, 0.0\n for leaf in self.leaves:\n dist = self.node_distance(leaf1, leaf)\n if dist > longest_dist:\n leaf2 = leaf\n longest_dist = dist\n for ind, (n1, n2) in enumerate(zip(self.paths[leaf1], self.paths[leaf2])):\n if n1 != n2:\n break\n rev_ind = ind - len(self.paths[leaf1]) - 1\n nodes = self.paths[leaf1][-1:rev_ind-1:-1] + self.paths[leaf2][ind:]\n dists = self.path_dists[leaf1][-1:rev_ind:-1] + self.path_dists[leaf2][ind:]\n mid_dist, cur_dist = longest_dist / 2.0, 0.0\n for i in range(len(nodes)-1):\n dist = dists[i]\n if cur_dist + dist >= mid_dist:\n node1, node2 = nodes[i], nodes[i+1]\n if cur_dist + dist == mid_dist:\n distance = dist\n else:\n distance = mid_dist - cur_dist\n break\n else:\n cur_dist += dist\n return node1, node2, distance", "def nodeAtMinimumDistance(self, notFoundYet, distances):\n # found minimal\n minimal = None\n for node in notFoundYet:\n if (distances[node] >= 0): \n if minimal == None or (distances[minimal] > distances[node]):\n minimal = node\n\n # return\n if minimal == -1: return None\n else: return minimal", "def extractmin(self):\n if len(self.heap) == 0: \n return None\n i = self.heap[0]\n last = self.heap[-1]\n del self.heap[-1]\n if len(self.heap) > 0:\n self.siftdown(last, 0)\n return i", "def min(self):\n least = self.data[0]\n \n for i in range(len(self.data)):\n if self.data[i] < least:\n least = self.data[i]\n return least", "def distance(self) -> int:\n return 0", "def _distance_to_line(begin, end, point):\n return _vec_distance(point, _nearest_point_on_line(begin, end, point))", "def nearest(items, pivot):\n return min(items, key=lambda x: abs(x - pivot))", "def DRFindMinimalEdge(self, flow, edge_lst):\n\t\tarr_time = flow[3]\n\t\tend_time = flow[3] + flow[4]\n\t\tmin_cum_size = float('inf')\n\t\tmin_edge = (-1, -1)\n\t\tfor e in edge_lst:\n\t\t\tprev_time = arr_time\n\t\t\tprev_rate = 0\n\t\t\tcum_size = 0\n\t\t\tfor time, rate in sorted(self.rate_lst[e].items()):\n\t\t\t\tif time > arr_time:\n\t\t\t\t\tif time < end_time:\n\t\t\t\t\t\tcum_size += prev_rate * (time - prev_time)\n\t\t\t\t\telse:\n\t\t\t\t\t\tcum_size += prev_rate * (end_time - prev_time)\n\t\t\t\t\t\tbreak\n\t\t\t\tif cum_size >= min_cum_size:\n\t\t\t\t\t# Break if already >= minimum\n\t\t\t\t\tbreak\n\t\t\t\telif time >= end_time:\n\t\t\t\t\t# Break if time exceeds deadline\n\t\t\t\t\tbreak\n\n\t\t\t\tprev_time = time\n\t\t\t\tprev_rate = rate\n\n\t\t\tif cum_size < min_cum_size:\n\t\t\t\tmin_cum_size = cum_size\n\t\t\t\tmin_edge = e\n\n\t\treturn min_edge, min_cum_size", "def h(self,node):\n \"*** YOUR CODE HERE ***\"\n dist_arr = []\n for goal in self.goals:\n dist_arr.append(manhattan_distance_with_heading(node.state, goal))\n return min(dist_arr)", "def get_closest_slot(self):\n if not self.__available_slots__:\n return None\n return min(self.__available_slots__)", "def find_min_node(self):\n min_energy = 10 ** 10\n min_id = -1\n for node in self.node:\n if node.energy < min_energy:\n min_energy = node.energy\n min_id = node.id\n return min_id", "def get_nearest_row(self):\n return (self.rect.top - (self.screen.get_height() // 12)) // self.maze.block_size", "def compute_min_refills(distance: int, tank: int, stops: List[int]):\n location: int = 0\n n_stops = 0\n last_stop = 0\n max_drive = location + tank\n\n while max_drive < distance:\n counter = 0\n\n # Handle the case that stops are depleted before we reach distance\n if len(stops) == 0:\n return -1\n for s in stops:\n if s <= max_drive:\n counter += 1\n last_stop = s\n max_drive = last_stop + tank\n\n # Handle the case that wi did not reach the next stop\n if counter == 0:\n return -1\n else:\n del stops[0:counter]\n n_stops += 1\n\n return n_stops", "async def distance(self):\n return round(await self._rpc.distance(), 2)", "def _get_nearest_slot(self):\n available_slots = [pslot for pslot in self.slots.values() if pslot.available]\n if not available_slots:\n return None\n\n return sorted(available_slots, key=lambda x: x.slot_no)[0]", "def get_distance(self) -> int:\n return self.get_measurement_data().distance", "def min_diff_return(self, data_useable):\n\n min_diff = abs(data_useable[0][1] - data_useable[0][2])\n\n for data_row in data_useable:\n if abs(float(data_row[2]) - float(data_row[1])) < min_diff:\n min_diff = abs(float(data_row[2]) - float(data_row[1]))\n self.data = data_row\n\n return self.data", "def get_distance(self, node):\n return np.sqrt(\n (self.x - node.x) ** 2 +\n (self.y - node.y) ** 2\n )", "def find_closest_interior_node(arc, hull, nodes):\n lengths = [float(\"inf\") for x in range(len(nodes))]\n for (j, node) in enumerate(nodes):\n if not node.nid in hull:\n i = hull.index(arc[1])\n hull.insert(i, node.nid)\n lengths[j] = (tsputil.get_path_length(nodes, 100, hull))\n hull.pop(i)\n return lengths.index(min(lengths))", "def nearest(node):\n count = 0\n distance = 100000\n while count != node_count[0]:\n city = d_list[node.value - 1]\n if city != []:\n if city[0][1] < distance:\n distance = city[0][1]\n new_city = city[0][0]\n closest_city = node.value\n node = node.left\n count = count + 1\n return (closest_city, new_city, distance)", "def find_move_from_line(\n x,\n data,\n overlap_penalty,\n norm_penalty,\n offdiagonal_energy_penalty,\n lagrange_multiplier,\n energy_weights=None,\n max_norm_deviation=0.2,\n):\n N = np.abs(data[\"overlap\"].diagonal(axis1=1, axis2=2))\n Nij = np.asarray([np.sqrt(np.outer(a, a)) for a in N])\n nwf = data[\"energy\"].shape[-1]\n if energy_weights is None:\n energy_weights = np.ones(nwf) / nwf\n\n energy = data[\"energy\"] / Nij\n overlap = data[\"overlap\"]\n # print(\"energy cost\", np.sum(energy.diagonal(axis1=1,axis2=2),axis=1))\n # print(\"overlap cost\",np.sum(np.triu(overlap**2,1),axis=(1,2)) )\n # print(\"offdiagonal_energy\", energy)\n # print(\"norm\",np.einsum('ijj->i', (overlap-1)**2 ))\n cost = (\n np.einsum(\"i,nii->n\", energy_weights, energy)\n + overlap_penalty * np.sum(np.triu(overlap**2, 1), axis=(1, 2))\n + np.sum(lagrange_multiplier * np.triu(overlap, 1), axis=(1, 2))\n + offdiagonal_energy_penalty * np.sum(np.triu(energy**2, 1), axis=(1, 2))\n + norm_penalty * np.einsum(\"ijj->i\", (overlap - 1) ** 2)\n )\n\n # good_norms = np.prod(np.einsum('ijj->ij',np.abs(overlap-1) < max_norm_deviation),axis=1)\n # print(\"good norms\", good_norms, 'cost', cost[good_norms])\n xmin = linemin.stable_fit(x, cost)\n return xmin, cost", "def new_distance(self, prev_len, prev, sol, i, l):\n swapped1 = i\n swapped2 = l\n if swapped2 == self.file_size-1:\n remove = self.dist_matrix[prev[swapped1 - 1]][prev[swapped1]] \\\n + self.dist_matrix[prev[swapped2]][prev[0]]\n add = self.dist_matrix[sol[swapped1 - 1]][sol[swapped1]] + self.dist_matrix[sol[swapped2]][sol[0]]\n attempt = prev_len - remove + add\n else:\n remove = self.dist_matrix[prev[swapped1 - 1]][prev[swapped1]] \\\n + self.dist_matrix[prev[swapped2+1]][prev[swapped2]]\n add = self.dist_matrix[sol[swapped1 - 1]][sol[swapped1]] + self.dist_matrix[sol[swapped2+1]][sol[swapped2]]\n attempt = prev_len - remove + add\n return attempt", "def _get_max_dist_from_tail(self, snake, board, food_evaluation):\n\n if snake.health_points == self.MAX_HEALTH:\n return 1000\n elif food_evaluation == CANT_FIND:\n return 2\n else:\n return 2 + (self.MAX_HEALTH - snake.health_points) / (\n self.MAX_HEALTH) * (board.width + board.height)", "def get_min_distance(distances, unvisited_nodes):\n min_value = None\n node = None\n for city, distance in distances.items():\n if city not in unvisited_nodes:\n continue\n if min_value is None:\n node = city\n min_value = distance\n elif distance < min_value:\n node = city\n min_value = distance\n return node", "def getnearest(iterable, value):\n return min(enumerate(iterable), key=lambda i: abs(i[1] - value))", "def smallest_ellapsed(login):\n df = login\n df[\"Time\"] = pd.to_datetime(df[\"Time\"])\n\n\n return df.groupby(\"Login Id\").agg(lambda group: group.diff().min()).dropna()[\"Time\"].to_frame()", "def find_min(self):\n\n if self.left:\n return self.left.find_min()\n\n return self.data", "def findMin(self):\n curr = self\n while curr.hasLeftChild():\n curr = curr.leftChild\n return curr", "def _get_closest(self, x, y, clients):\n target = min(\n clients,\n key=lambda c: math.hypot(c.x - x, c.y - y),\n default=self.clients.current_client,\n )\n return target", "def calc_dist(L, Seff):\n return (L / Seff)**0.5", "def derivative_of_dist_to_obstacle(min_dist, x_jnt0, y_jnt0, x_jnt1, y_jnt1,\n dx_jnt0, dy_jnt0, dx_jnt1, dy_jnt1,\n link_slope, x_obs, y_obs):\n dist, point_type = min_dist\n if point_type == 0:\n dist_der = ((x_jnt0 - x_obs) * dx_jnt0 + (y_jnt0 - y_obs) * dy_jnt0)\n dist_der /= dist\n elif point_type == 1:\n dist_der = ((x_jnt1 - x_obs) * dx_jnt1 + (y_jnt1 - y_obs) * dy_jnt1)\n dist_der /= dist\n elif point_type == 2:\n if link_slope is None:\n dist_der = dx_jnt0 if x_jnt0 > x_obs else -dx_jnt0\n elif link_slope == 0:\n dist_der = dy_jnt0 if y_jnt0 > y_obs else -dy_jnt0\n else:\n x_intersect = (\n x_obs / link_slope + y_obs + link_slope * x_jnt0 - y_jnt0\n ) / (link_slope + 1 / link_slope)\n y_intersect = link_slope * (x_intersect - x_jnt0) + y_jnt0\n dlink_slope = (\n (1 / (x_jnt1 - x_jnt0))\n * (dy_jnt1 - dy_jnt0 + link_slope * (dx_jnt1 - dx_jnt0))\n )\n dx_intersect = (\n link_slope**4 * dx_jnt0\n + link_slope**2 * dlink_slope * (y_jnt0 - y_obs)\n - link_slope**3 * dy_jnt0\n + dlink_slope * (y_obs - y_jnt0)\n + 2 * link_slope * dlink_slope * (x_jnt0 - x_obs)\n + link_slope**2 * dx_jnt0\n - link_slope * dy_jnt0\n ) / (1 + link_slope**2) ** 2\n dy_intersect = (link_slope * (dx_intersect - dx_jnt0)\n + dlink_slope * (x_intersect - x_jnt0)\n + dy_jnt0)\n dist_der = (\n (x_intersect - x_obs) * dx_intersect\n + (y_intersect - y_obs) * dy_intersect\n ) / dist\n return dist_der", "def closest_fruit(maze, currX, currY, fruit_list):\n curr_min = sys.maxsize\n for position in fruit_list:\n distance = Astar(maze, currX, currY, position[0], position[1])\n if distance < curr_min:\n curr_min = distance\n return curr_min", "def displacement(self):\n return self[0].distance(self[-1])", "def distance(self, p=None, l=None):\n if l is None:\n d = p - self.zero\n n = np.zeros(3)\n # try:\n # n = d - np.dot(d, self.direction) * self.direction\n # except RuntimeWarning:\n # print(d, self.direction)\n # return norm(n)\n with warnings.catch_warnings(record=True) as w:\n # Cause all warnings to always be triggered.\n warnings.simplefilter(\"always\")\n n = d - np.dot(d, self.direction) * self.direction\n # print(n, norm(n))\n if len(w) > 0 and issubclass(w[-1].category, RuntimeWarning):\n # Todo: check w/ Ram if this is what he meant to do when catch a warning: n = np.zeros(3)\n # n = np.zeros(3)\n # print(d, self.direction)\n pass\n return norm(n)\n else:\n normal = np.cross(self.direction, l.direction)\n n = norm(normal)\n if n < sys.float_info.min:\n # Lines are parallel.\n return self.distance(p=l.zero)\n offset = np.dot(l.zero - self.zero, normal) / n\n return np.abs(offset)", "def get_head_distance(head_pixels, frame_shape):\n head_center = ((head_pixels[0] + head_pixels[2]) / 2 / frame_shape[1],\n (head_pixels[1] + head_pixels[3]) / 2 / frame_shape[0])\n center = (0.5, 0.5)\n return np.linalg.norm(np.subtract(head_center, center))", "def _get_distance_diff(self, input):\n nbatch = input.shape[0]\n in1 = input.unsqueeze(1).expand(\n nbatch, self.nelec, self.nelec, self.ndim)\n in2 = input.unsqueeze(2).expand(\n nbatch, self.nelec, self.nelec, self.ndim)\n dist = torch.pow(in1 - in2, 2).sum(3)\n return dist", "def item_t(data_alt, item, min_t):\n for t in data_alt[item]:\n if t >= min_t:\n return t\n return None", "def get_min(h: Heap) -> Node:\n prev, curr = _min(h)\n return curr", "def takeClosest(myList, myNumber):\n pos = bisect_left(myList, myNumber)\n if pos == 0:\n return 0 #myList[0]\n if pos == len(myList):\n return len(myList)-1 #myList[-1]\n\n before = myList[pos - 1]\n after = myList[pos]\n\n if after - myNumber < myNumber - before:\n return pos #after\n else:\n return pos-1 #before", "def get_closest_waypoint(self, pose):\n wpclosestDist = sys.maxint\n for index in range(len(self.waypoints.waypoints)):\n wp = self.waypoints.waypoints[index]\n wpdist = self.calcDistance_PoseStamped(pose, wp.pose)\n if(wpclosestDist > wpdist):\n wpclosestDist = wpdist\n wpindex = index\n return wpindex", "def left_distance(self):\n return self.x", "def lmin(scape, start):\n i = start\n while scape[i - 1] < scape[i] - 0.06:\n i -= 1\n while scape[i + 1] < scape[i] - 0.06:\n i += 1\n return i", "def min(self):\n return self.get_first()", "def get_distance_between_checkpoint(self, i1, i2):\n ma = max(i1, i2)\n mi = min(i1, i2)\n dist = 0\n while mi != ma:\n dist += get_distance(self.cp[mi], self.cp[mi + 1])\n mi += 1\n return dist", "def closest(start, incoming_angle, timeleft):\n visited = set()\n frontier = [ (0, 0, 0, incoming_angle, start) ]\n distances = {}\n while frontier:\n (cost, difficulty, count, in_angle, n) = heappop(frontier)\n if n in visited:\n continue\n distances[n] = cost\n if cost > timeleft:\n # cannot reach a non visited edge on time\n return None\n edges = sorted(n.edges, key=priority(in_angle))\n for edge in edges:\n if cost + edge.cost <= timeleft:\n # we can take this edge\n if edge.distance > 0:\n return compute_path(distances, cost, start, n) + [ edge ]\n else:\n if edge.stop not in visited:\n difficulty = max(e2.difficulty for e2 in edge.stop.edges)\n candidate = (cost + edge.cost, difficulty, count + edge.visits, edge.angle, edge.stop)\n # print candidate\n heappush(frontier, candidate)\n visited.add(n)\n return None", "def distance(self) -> float:\n return self._dist_two_wire() # at this time we only support 2-wire meausre", "def min_time(self):\n return self._ll_tree_sequence.get_min_time()", "def find_minimum_path_cost(cls, start_point: Coordination, current_point: Coordination) -> int:\n return abs(start_point.x - current_point.x) + abs(start_point.y - current_point.y)", "def diameter(self):\n\n v = self.vertices()\n pairs = [ (v[i],v[j]) for i in range(len(v)-1) for j in range(i+1, len(v))]\n smallest_paths = []\n for (s,e) in pairs:\n paths = self.find_all_path(s,e)\n smallest = sorted(paths, key=len)[0]\n smallest_paths.append(smallest)\n\n smallest_paths.sort(key=len)\n\n # Print the list smallest_paths\n\n # Longest path is at the end of list\n # ie diameter corresponds to length of this path\n\n diameter = len(smallest_paths[-1]) -1\n return diameter", "def distance(self, other_pt, is_lla=True):\n return 0.0" ]
[ "0.64288473", "0.6343981", "0.629061", "0.6034535", "0.602224", "0.6003383", "0.5981095", "0.593175", "0.5914017", "0.5857995", "0.5832663", "0.5826591", "0.5805714", "0.5800851", "0.57828003", "0.5737947", "0.5734673", "0.56870914", "0.56647193", "0.56186366", "0.55836093", "0.5577574", "0.55390465", "0.55263233", "0.55238676", "0.5523338", "0.55123746", "0.5509846", "0.5486163", "0.5485987", "0.5485585", "0.54688555", "0.5467318", "0.546515", "0.546386", "0.5456844", "0.5456844", "0.54343045", "0.5405624", "0.54034925", "0.5402656", "0.5395097", "0.53858435", "0.5382595", "0.5381849", "0.53782016", "0.53654057", "0.5361178", "0.5360914", "0.5356503", "0.5355428", "0.5354138", "0.53481853", "0.5335629", "0.5325433", "0.53233004", "0.5323206", "0.53122133", "0.53114223", "0.5307121", "0.53050417", "0.53039086", "0.52988684", "0.52892566", "0.52871585", "0.5279777", "0.5271625", "0.5271201", "0.5271162", "0.5268719", "0.5267176", "0.5264495", "0.52634966", "0.52621424", "0.52610207", "0.5253197", "0.5251721", "0.52439606", "0.5237188", "0.5236815", "0.5232567", "0.5229691", "0.5229025", "0.5225606", "0.5222194", "0.52181876", "0.5216585", "0.52059674", "0.5202546", "0.52024794", "0.51999027", "0.5197873", "0.5197725", "0.5196957", "0.5196146", "0.5194552", "0.5191597", "0.5189709", "0.51890904", "0.51872677" ]
0.5248442
77
Load data from CSV files and return them as numpy arrays The use_labels parameter indicates whether one should read the first column (containing class labels). If false, return all 0s.
def load_data(filename, use_labels=True): # load column 1 to 8 (ignore last one) data = np.loadtxt(open( filename), delimiter=',', usecols=range(1, 9), skiprows=1) if use_labels: labels = np.loadtxt(open( filename), delimiter=',', usecols=[0], skiprows=1) else: labels = np.zeros(data.shape[0]) return labels, data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(self, path=\"../data_set/final-train-dataset.csv\", shuffle=False,\n onlyLabelToUse=None, useOnlyBestIndicators=False, binary=False):\n data = []\n labels = []\n\n with open(path) as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n skip = True\n\n for row in reader:\n if skip:\n skip = False\n continue\n\n entries = []\n\n for i in range(SAMPLES_OF_DATA_TO_LOOK_AT):\n # Get one time point's data, including indicators:\n if useOnlyBestIndicators:\n # entries.append(\n # [float(row[i + j * SAMPLES_OF_DATA_TO_LOOK_AT]) for\n #\n entries.append(\n [float(row[i + j * SAMPLES_OF_DATA_TO_LOOK_AT]) for\n j in [0, 1, 2, 5, 7]])\n else:\n entries.append(\n [float(row[i + j * SAMPLES_OF_DATA_TO_LOOK_AT]) for\n j in range(INPUT_CHANNELS)])\n\n data.append(np.array(entries))\n\n if onlyLabelToUse is not None:\n if binary:\n label = [int(float(row[SAMPLES_OF_DATA_TO_LOOK_AT * TOTAL_INPUTS_IN_DATASET + onlyLabelToUse]) > 0.5)]\n else:\n label = [float(row[SAMPLES_OF_DATA_TO_LOOK_AT * TOTAL_INPUTS_IN_DATASET + onlyLabelToUse])]\n else:\n if binary:\n label = [int(float(row[SAMPLES_OF_DATA_TO_LOOK_AT * TOTAL_INPUTS_IN_DATASET + j]) > 0.5) for j in range(OUTPUT_CHANNELS)]\n else:\n label = [float(row[SAMPLES_OF_DATA_TO_LOOK_AT * TOTAL_INPUTS_IN_DATASET + j]) for j in range(OUTPUT_CHANNELS)]\n\n labels.append(label)\n\n if shuffle:\n indices = [i for i in range(len(data))]\n np.random.shuffle(indices)\n labels = np.array([labels[i] for i in indices])\n data = np.array([data[i] for i in indices])\n else:\n data = np.array(data)\n labels = np.array(labels)\n\n return data, labels", "def read_data(filename):\n data = np.genfromtxt(filename, delimiter=',', dtype = str)\n X = data[1:,2:].astype(np.float)\n y = data[1:,0]\n y[y==label0]='0' \n y[y==label1]='1' \n y[y==label2]='2'\n y.astype(np.float) \n return X, y", "def read_data(filename):\n data = np.genfromtxt(filename, delimiter=',', dtype=str)\n X = data[1:,2:].astype(np.float)\n y = data[1:,0]\n y[y==label0]='0'\n y[y==label1]='1'\n y[y==label2]='2'\n y=y.astype(np.float)\n return X, y", "def load_labeled_data(files):\n\tx = []\n\ty = []\n\tfor filename in files:\n\t\tdata = []\n\t\twith open(filename) as infile:\n\t\t\tlabel = int(infile.readline())\n\t\t\tfor line in infile:\t\n\t\t\t\tdata.append(dna_string_to_array(line.strip()))\n\t\ty += [label]*len(data)\n\t\tx += data\n\n\treturn (np.array(x), np.array(y))", "def load_data_and_labels(data_file, labels_file):\r\n x_text = []\r\n y = []\r\n \r\n with open(data_file, encoding = \"utf-8\") as csvFile:\r\n readCSV = csv.reader(csvFile, delimiter = \",\")\r\n for row in readCSV:\r\n row = \"\".join(row)\r\n x_text.append(row) \r\n \r\n with open(labels_file, encoding = \"utf-8\") as csvFile2:\r\n readCSV = csv.reader(csvFile2, delimiter = \",\")\r\n for row in readCSV:\r\n d = defaultdict(list)\r\n for k,va in [(v,i) for i,v in enumerate(row)]:\r\n d[k].append(va)\r\n \r\n for k in range(len(d.get(\"1.0\"))):\r\n index = d.get(\"1.0\")[k]\r\n row[index] = 1\r\n for k in range(len(d.get(\"0.0\"))):\r\n index = d.get(\"0.0\")[k]\r\n row[index] = 0\r\n \r\n# print(len(row))\r\n y.append(row)\r\n \r\n\r\n\r\n\r\n \r\n print(\"x = {}\".format(len(x_text)))\r\n print(\"y = {}\".format(len(y)))\r\n \r\n return x_text, y", "def load_csv_data(data_path):\n print(\"LOADING CSV FILE FROM {}\".format(data_path))\n y = np.genfromtxt(data_path, delimiter=\",\", skip_header=1, dtype=str, usecols=[1])\n x = np.genfromtxt(data_path, delimiter=\",\", skip_header=1)\n ids = x[:, 0].astype(np.int)\n input_data = x[:, 2:]\n\n # convert class labels from strings to binary (-1,1)\n yb = np.ones(len(y))\n yb[np.where(y == 'b')] = -1\n\n return yb, input_data, ids", "def load_csv(fname = data_indoor):\n \n reader = csv.reader(open(fname, 'r'))\n \n # Blank list\n data = []\n \n # Don't read the zeroth element of each row (image name), convert to float.\n for row in reader:\n data.append(map(float, row[1:]))\n \n # Convert list to array \n d = np.array(data)\n \n # Seperate labels from features\n Y = d[:,0]\n X = d[:,1:]\n \n return X,Y", "def loader(filename,sep=',',rowskip=[], colskip=[], axis=1,names=1,fromstring=0):\n\n #manages excpetions to the csv file incase of missing data\n if (type(filename)==str) and (fromstring==1):\n iterable=filename.strip('\\n').split('\\n')\n content=np.array([i for i in csv.reader(iterable,delimiter=sep)])\n elif type(filename)==np.ndarray:\n content=filename\n else:\n content=np.array([i for i in\\\n csv.reader(open(filename,'r'),delimiter=sep)])\n #content=np.genfromtxt(filename,delimiter=sep,dtype=str)\n\n if rowskip:\n #rowskip.sort(reverse=True)\n content=np.delete(content,rowskip,0)\n #for i in rowskip: content.pop(i)\n\n if colskip:\n #colskip.sort(reverse=True)\n content=np.delete(content,colskip,1)\n #for i in colskip: content.pop(i)\n\n if axis==0: # if the file oriented column-wise\n #content=list(map(list,zip(*content)))\n content=content.T\n\n\n\n if names is 0:\n variables=np.arange(content.shape[1]).tolist()\n offset=0\n else:\n variables=content[0].tolist()\n offset=1\n\n try:\n content=np.array([conv_col(col) for col in\n content[offset:].T],dtype='object')\n arity=np.array([np.unique(i).size for i in content])\n return dataset(variables,content.T,arity)\n except ValueError: \n print( 'Data could not be loaded, failed converting to float.')\n return content", "def load_dataset(csv_path, label_col='y', add_intercept=False):\n\n def add_intercept_fn(x):\n global add_intercept\n return add_intercept(x)\n\n # Validate label_col argument\n allowed_label_cols = ('y', 't')\n if label_col not in allowed_label_cols:\n raise ValueError('Invalid label_col: {} (expected {})'\n .format(label_col, allowed_label_cols))\n\n # Load headers\n with open(csv_path, 'r') as csv_fh:\n headers = csv_fh.readline().strip().split(',')\n\n # Load features and labels\n x_cols = [i for i in range(len(headers)) if headers[i].startswith('x')]\n l_cols = [i for i in range(len(headers)) if headers[i] == label_col]\n inputs = np.loadtxt(csv_path, delimiter=',', skiprows=1, usecols=x_cols)\n labels = np.loadtxt(csv_path, delimiter=',', skiprows=1, usecols=l_cols)\n\n if inputs.ndim == 1:\n inputs = np.expand_dims(inputs, -1)\n\n if add_intercept:\n inputs = add_intercept_fn(inputs)\n\n return inputs, labels", "def load_data(fl=\"data.csv\"):\n data = np.loadtxt(fl, delimiter=\",\")\n y1 = data[:, 0]\n y2 = data[:, 1]\n return y1, y2", "def read(train_path, test_path, label_name):\n train_dataset = pd.read_csv(train_path)\n test_dataset = pd.read_csv(test_path)\n\n train_labels = train_dataset.pop(label_name)\n\n imputer = DataFrameImputer().fit(train_dataset)\n train_dataset = imputer.transform(train_dataset)\n test_dataset = imputer.transform(test_dataset)\n\n train_dataset = pd.get_dummies(train_dataset)\n test_dataset = pd.get_dummies(test_dataset)\n\n train_dataset = train_dataset.drop(train_dataset.columns.difference(test_dataset.columns), axis=1)\n test_dataset = test_dataset.drop(test_dataset.columns.difference(train_dataset.columns), axis=1)\n\n scaler = StandardScaler().fit(train_dataset)\n train_dataset = scaler.transform(train_dataset)\n test_dataset = scaler.transform(test_dataset)\n\n return train_dataset, train_labels, test_dataset", "def load_data(self):\n\n data_pd = pd.read_csv(self.filename)\n return np.array(data_pd)", "def load_test_data(label_fname, data_fname):\n labels = load_csv(label_fname)\n data = load_csv(data_fname, 'excel-tab')\n\n # Join all data together on the ids given in the files\n joined_data = {}\n for label in labels:\n id = label[0]\n joined_data[id] = {'class': label[1]}\n for rec in data:\n id = rec[0]\n if id in joined_data:\n joined_data[id]['data'] = rec[1]\n\n # Clean and convert the data to reals\n max_features = 0\n for id in joined_data:\n words = clean_text(joined_data[id]['data'])\n reals = convert_to_reals(words)\n joined_data[id]['data'] = reals\n if len(reals) > max_features:\n max_features = len(reals)\n\n # Pad the data\n for id in joined_data:\n reals = joined_data[id]['data']\n joined_data[id]['data'] = reals + (max_features - len(reals)) * [0.0]\n\n # Prepare the data for training\n training_data = np.array([joined_data[id]['data'] for id in joined_data])\n training_labels = [joined_data[id]['class'] == 'OFF' for id in joined_data]\n return training_labels, training_data, max_features", "def load_csv_data(data_path, sub_sample=False):\n y = np.genfromtxt(data_path, delimiter=\",\", skip_header=1, dtype=str, usecols=1)\n x = np.genfromtxt(data_path, delimiter=\",\", skip_header=1)\n labels = open(data_path,'r').readline()\n\n\n ids = x[:, 0].astype(np.int)\n input_data = x[:, 2:]\n labels = labels.strip().split(\",\")\n del labels[0]\n del labels[0]\n\n # convert class labels from strings to binary (-1,1)\n yb = np.ones(len(y))\n yb[np.where(y == 'b')] = -1\n\n # sub-sample\n if sub_sample:\n yb = yb[::50]\n input_data = input_data[::50]\n ids = ids[::50]\n\n return yb, input_data, ids, labels", "def load_data_and_labels():\n # Load data from files\n positive_examples = []\n for file in os.listdir('with_datarace'):\n filename = os.fsdecode(file)\n ast_file = open('with_datarace\\\\' + filename, 'r')\n token_vector = ast_file.read()\n positive_examples.append(token_vector)\n file_names.append(filename)\n\n negative_examples = []\n for file in os.listdir('without_datarace\\\\'):\n filename = os.fsdecode(file)\n ast_file = open('without_datarace\\\\' + filename, 'r')\n token_vector = ast_file.read()\n negative_examples.append(token_vector) # List of lists\n file_names.append(filename)\n\n positive_examples = [s.strip() for s in positive_examples]\n negative_examples = [s.strip() for s in negative_examples]\n\n # Split by words\n x_text = positive_examples + negative_examples # why we didn't cobine it from the beginning?\n x_text = [clean_str(sent) for sent in x_text]\n x_text = [s.split(\" \") for s in x_text]\n\n # Generate labels\n positive_labels = [[0, 1] for _ in positive_examples]\n negative_labels = [[1, 0] for _ in negative_examples]\n y = np.concatenate([positive_labels, negative_labels], 0)\n\n return [x_text, y]", "def load_data(self):\n with open(self.file_name) as f:\n lines = f.readlines()\n\n labels = list()\n all_dat = list()\n for i, l in enumerate(lines):\n\n labels.append(int(l[0]))\n\n l = gensim.utils.any2unicode(l)\n all_dat.append(LabeledSentence(l.split(\"\\t\")[-1], [i]))\n\n return all_dat, np.asarray(labels)", "def loadCSVSeeds(self, csvFilePath):\n labels = []\n with open(csvFilePath) as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n for row in reader:\n labels.append([row[0], row[1], [float(row[2]), float(row[3]), float(row[4]) ]])\n print(csvFilePath + \": labels loaded\")\n return labels", "def read_labels(labels_path):\n with open(labels_path, 'r') as file:\n data = file.read()\n data = data.split()\n data = np.array(data)\n data = np.reshape(data, (-1, 2))\n return data", "def read_label(filepath, read_scalars=False):\n label_array = np.loadtxt(filepath, dtype=np.int, skiprows=2, usecols=[0])\n if read_scalars:\n scalar_array = np.loadtxt(filepath, skiprows=2, usecols=[-1])\n return label_array, scalar_array\n return label_array", "def load_train_data():\n\n # Load X_train\n with open('X_train.csv') as csvfile:\n reader = csv.DictReader(csvfile)\n feature_string_matrix = []\n for row in reader:\n feature_list = []\n for i in range(TRAIN_N):\n x_value = row['x' + str(i)]\n # Hit missing values\n if x_value == '':\n feature_list.append(np.nan)\n else:\n feature_list.append(float(row['x' + str(i)]))\n feature_string_matrix.append(feature_list)\n X_train = np.array(feature_string_matrix)\n # Load Y_train\n with open('y_train.csv') as csvfile:\n reader = csv.DictReader(csvfile)\n y_string = []\n for row in reader:\n y_value = [float(row['y'])]\n y_string.append(y_value)\n y_train = np.array(y_string)\n return X_train, y_train", "def loadTestData():\n path = raw_input(\"Enter the path of Test Data: \")\n data = np.genfromtxt(path, delimiter=',', dtype=int)\n\n labels = data[:, -1]\n\n unwantedLabels = [4, 5, 6, 7, 8, 9]\n listToDelete = []\n for i, line in enumerate(range(len(data))):\n if labels[i] in unwantedLabels:\n listToDelete.append(i)\n\n actualData = np.delete(data, listToDelete, axis=0)\n\n # print(actualData.shape)\n # Separating the labels and data into different arrays\n actualLabels = actualData[:, -1]\n actualData = actualData[:, :-1]\n\n actualData = pre.scale(actualData)\n\n # Change the label vector to label matrix\n # If Label is 2 then it becomes [0, 1, 0]\n labelMatrix = np.zeros((actualLabels.shape[0], 4))\n for j in range(len(actualLabels)):\n if actualLabels[j] == 0:\n labelMatrix[j][0] = 1\n if actualLabels[j] == 1:\n labelMatrix[j][1] = 1\n if actualLabels[j] == 2:\n labelMatrix[j][2] = 1\n if actualLabels[j] == 3:\n labelMatrix[j][3] = 1\n\n return actualData, actualLabels", "def load_csv(data_file_path, class_index=-1):\n\n handle = open(data_file_path, 'r')\n contents = handle.read()\n handle.close()\n rows = contents.split('\\n')\n out = np.array([[float(i) for i in r.split(',')] for r in rows if r])\n\n if class_index == -1:\n classes = map(int, out[:, class_index])\n features = out[:, :class_index]\n return features, classes\n\n elif class_index == 0:\n classes = map(int, out[:, class_index])\n features = out[:, 1:]\n return features, classes\n\n else:\n return out", "def load_data(csv_filename):\n data = np.genfromtxt(csv_filename, delimiter=\";\", skip_header=1, usecols=range(11))\n return data", "def load_samples_and_labels(data_path, header=True, col=1, train=True):\n if header:\n start_index = 1\n else:\n start_index = 0\n\n with open(data_path, 'r', encoding='utf-8') as f:\n lines = f.read().splitlines()[start_index:]\n samples = [line.split(',')[col] for line in lines]\n samples = [sample.split() for sample in samples]\n\n if train:\n labels = [int(line.split(',')[3]) for line in lines]\n else:\n labels = []\n\n return samples, labels", "def _read_labels_csv_file(self, csv_file_path, image_file_paths):\n\n self.__logger.debug('[Get Labels]')\n self.__logger.debug('Read CSV Labels ( %s ) ...' % csv_file_path)\n\n image_file_names = self.get_file_names_from_file_paths(file_paths=image_file_paths)\n\n labels = []\n\n with open(csv_file_path, newline='') as csvfile:\n read_image_files = 0 # numbers of image files read\n rows = csv.reader(csvfile)\n\n for row in rows:\n file_name = row[0]\n # make file name from '00030183_004.png' to '00030183_004'\n file_name = file_name.split('.')\n file_name = file_name[0]\n\n # if csv file name matches image file name, the label of the former will be stored in labels (list)\n if file_name == image_file_names[read_image_files]: # image_file_name has to remove str '.jpg'\n label = row[1].split('|')\n label_id = []\n for i in range(len(label)):\n label_id.append(Xray_class_id[label[i]])\n labels.append(label_id) # store the label\n\n read_image_files += 1\n if read_image_files == len(image_file_names): # if numbers of image files read equals numbers of\n # batch images, then break\n break\n\n self.__logger.debug('Done !')\n\n return labels", "def get_labels_df():\n labels_df = pd.read_csv('data/train/truth_train.csv', header=None)\n return labels_df", "def read_csv(path_to_file):\n position = []\n classification = []\n with open(path_to_file, 'r') as csv_file:\n reader = csv.reader(csv_file)\n next(reader, None) # skip the header\n\n for row in reader:\n position.append(np.array([float(row[0]), float(row[1])]))\n classification.append(float(row[2]))\n\n return np.array(position), np.array(classification, dtype='uint8')", "def load_csv_data(data_path, sub_sample=False):\n y = np.genfromtxt(data_path, delimiter=\",\", skip_header=1, dtype=str, usecols=1)\n x = np.genfromtxt(data_path, delimiter=\",\", skip_header=1)\n ids = x[:, 0].astype(np.int)\n input_data = x[:, 2:]\n\n # convert class labels from strings to binary (-1,1)\n yb = np.ones(len(y))\n yb[np.where(y=='b')] = -1\n \n # sub-sample\n if sub_sample:\n yb = yb[::50]\n input_data = input_data[::50]\n ids = ids[::50]\n\n return yb, input_data, ids", "def load_csv_data(data_path, sub_sample=False):\n y = np.genfromtxt(data_path, delimiter=\",\", skip_header=1, dtype=str, usecols=1)\n x = np.genfromtxt(data_path, delimiter=\",\", skip_header=1)\n ids = x[:, 0].astype(np.int)\n input_data = x[:, 2:]\n\n # convert class labels from strings to binary (-1,1)\n yb = np.ones(len(y))\n yb[np.where(y=='b')] = -1\n \n # sub-sample\n if sub_sample:\n yb = yb[::50]\n input_data = input_data[::50]\n ids = ids[::50]\n\n return yb, input_data, ids", "def load_data(fname, skip_header=0, delimiter=','):\n\n data = np.genfromtxt(fname, dtype=str, comments=None, delimiter=delimiter, skip_header=skip_header)\n\n pathes = data[:, 0]\n labels = data[:, 1]\n\n return pathes, labels", "def load_labels(label_file) :\n df = pd.read_csv(label_file, index_col=\"p_index\",\n dtype=str, na_values=['nan', 'NaN', '']).dropna()\n\n return df", "def load_test(data_file, labels=True, skip_rows=0):\n dat = iter_loadtxt(data_file, delimiter='\\t', skiprows=skip_rows)\n if labels:\n Dat = DataSet(images=dat[:, 0:-1], labels=dat[:, -1].astype(int), reshape=False)\n else:\n nrow = dat.shape[0]\n Dat = DataSet(images=dat, labels=np.full(nrow, np.nan), reshape=False)\n return Dat", "def matlab_csv_to_teacher_data(dirname):\n samples = np.genfromtxt(os.path.join(dirname, 'samples.csv'), dtype=float,\n delimiter=\",\")\n labels = np.genfromtxt(os.path.join(dirname, 'labels.csv'), dtype=int,\n delimiter=\",\")\n data = [None]*max(labels) # matlab is 1-indexed, so no need to add 1\n for i, z in enumerate(labels):\n if data[z-1] is None:\n data[z-1] = np.copy(samples[i, :])\n else:\n data[z-1] = np.vstack((data[z-1], np.copy(samples[i, :])))\n return data", "def load_data(self):\n print('Loading {} dataset'.format(self.split))\n data_split_path = os.path.join(self.root_dir, 'splits', '{}.csv'.format(self.split))\n with open(data_split_path,'r') as f:\n reader = csv.reader(f, delimiter=',')\n data_classes = {}\n for i,row in enumerate(reader):\n if i==0:\n continue\n data_classes[row[1]] = 1\n data_classes = data_classes.keys()\n print(data_classes)\n\n n_classes = len(data_classes)\n print('n_classes:{}, n_label:{}, n_unlabel:{}'.format(n_classes,self.n_label,self.n_unlabel))\n dataset_l = np.zeros([n_classes, self.n_label, self.im_height, self.im_width, self.channels], dtype=np.float32)\n if self.n_unlabel>0:\n dataset_u = np.zeros([n_classes, self.n_unlabel, self.im_height, self.im_width, self.channels], dtype=np.float32)\n else:\n dataset_u = []\n\n for i, cls in enumerate(data_classes):\n im_dir = os.path.join(self.root_dir, 'data/{}/'.format(self.split), cls)\n im_files = sorted(glob.glob(os.path.join(im_dir, '*.jpg')))\n np.random.RandomState(self.seed).shuffle(im_files) # fix the seed to keep label,unlabel fixed\n for j, im_file in enumerate(im_files):\n im = np.array(Image.open(im_file).resize((self.im_width, self.im_height)), \n np.float32, copy=False)\n if j<self.n_label:\n dataset_l[i, j] = im\n else:\n dataset_u[i,j-self.n_label] = im\n print('labeled data:', np.shape(dataset_l))\n print('unlabeled data:', np.shape(dataset_u))\n \n self.dataset_l = dataset_l\n self.dataset_u = dataset_u\n self.n_classes = n_classes", "def get_data():\n\n pathxtrain = sys.argv[1]\n pathxtest = sys.argv[2]\n pathlabeltrain = sys.argv[3]\n pathlabeltest = sys.argv[4]\n\n xtrain = p.read_csv(pathxtrain, header=None)\n xtest = p.read_csv(pathxtest, header=None)\n label_train = p.read_csv(pathlabeltrain, header=None)\n label_test = p.read_csv(pathlabeltest, header=None)\n\n xtrain_mx = xtrain.values\n xtest_mx = xtest.values\n\n label_train = label_train.values.reshape(label_train.shape[0])\n label_test = label_test.values.reshape(label_test.shape[0])\n\n return xtrain_mx, xtest_mx, label_train, label_test", "def load_data(filepath):\n data = import_csv(filepath, has_headers=False)\n x_data = data[:, 0:3]\n y_data = None\n if data.shape[1]>3:\n y_data = data[:, 3:]\n n_data = data.shape[0]\n\n return n_data, np.float64(x_data), np.float64(y_data)", "def load_csv(fichero):\r\n data = np.loadtxt(fichero, delimiter=',')\r\n X = data[:,:-1]\r\n y = data[:,-1]\r\n return X, y", "def load_file(file_name) -> np.ndarray:\r\n reader = csv.reader(open(file_name, \"r\"), delimiter=',')\r\n x_rdr = list(reader)\r\n return np.array(x_rdr).astype('float')", "def load_train_y(train_y_path):\n \n text = open(train_y_path, 'r')\n row = csv.reader(text)\n y = []\n n_row = 0\n for r in row:\n if n_row != 0:\n y.append(float(r[0]))\n n_row += 1\n text.close()\n y = np.array(y)\n \n return y", "def read(filename):\n records = Parser.__load_csv(filename)\n return np.array(records)", "def load_data_from_csv(f_name):\n data = []\n f = open(f_name, \"r\")\n reader = csv.reader(f,delimiter=\",\")\n for row in reader:\n data.append([float(i) for i in row])\n f.close()\n data = np.array(data)\n x = data[0,:]\n data = data[1:,:].swapaxes(0,1)\n return x, data", "def load_data():\n x = np.genfromtxt(X_FILE, usecols=(0, 1))\n y = np.genfromtxt(Y_FILE, usecols=(0))\n\n return x, y", "def loadData (x_file=\"../ass1_data/logisticX.csv\", y_file=\"../logisticY.csv\"):\n\n X = np.genfromtxt(x_file, delimiter=',')\n Y = np.genfromtxt(y_file, dtype=int)\n\n return (X, Y)", "def import_data(path, num_examples):\n data = np.empty((num_examples, 5), dtype=\"float128\")\n y = np.empty((num_examples, 1), dtype=\"float128\")\n\n with open(path, 'r') as f:\n i = 0\n for line in f:\n example = []\n terms = line.strip().split(',')\n for j in range(len(terms)):\n if j == 4:\n y[i] = 2 * float(terms[j]) - 1\n else:\n example.append(float(terms[j]))\n data[i, 1:] = example\n data[i, 0] = 1\n i += 1\n\n data = normalize(np.asmatrix(data), axis=0)\n return [data, np.asmatrix(y)]", "def _read_train_datas(self):\r\n with open(self.train_label_path, 'r') as fb:\r\n lines = fb.readlines()\r\n return self._parse_raw_labels(lines)", "def load_data(self, features=None, labels=None):\n if features is None or labels is None:\n self._features = None\n self._labels = None\n return\n if len(features) != len(labels):\n raise DataMismatchError('Features and labels lists are different lengths')\n try:\n self._features = np.array(features, dtype=float)\n self._labels = np.array(labels, dtype=float)\n except ValueError:\n self._features = None\n self._labels = None\n raise ValueError('Label and feature lists must be homogeneous (same data type)'\n 'and numeric (i.e integers and floats) list of lists')", "def read_data():\n csv_data = pd.read_csv('./dataset.csv')\n x = csv_data[['X1', 'X2']]\n x = x.values # numpy array for x: (180, 2)\n y = csv_data['Label']\n y = y.values # numpy array for y: (180, )\n\n\t# shuffle the data\n total = x.shape[0]\n mask = list(range(total))\n np.random.shuffle(mask)\n x = x[mask]\n y = y[mask]\n\t\n\t# 80 percent for train and 20 percent for test\n train_split = int(0.8 * total)\n x_train, y_train = x[:train_split], y[:train_split]\n x_test, y_test = x[train_split:], y[train_split:]\n return x_train, y_train, x_test, y_test", "def load_datasets(folder_path, glob_filter=\"*.csv\", labels_last_column=True, labels_filename=False, custom_func=None, **kwargs):\n # Convert the folder_path to a Path if needed\n if isinstance(folder_path, str):\n folder_path = Path(folder_path)\n elif isinstance(folder_path, Path):\n pass\n else:\n raise TypeError(f\"{type(folder_path)} is not a valid type for the folder_path\")\n # Check that the directory exists\n if not folder_path.is_dir():\n raise ValueError(f\"{folder_path} is not a directory\")\n # If both labels arguments are true, raise error\n if labels_last_column and labels_filename:\n raise ValueError(f\"labels_last_column and labels_filename cannot both be True\")\n # or if both are False\n elif not (labels_last_column or labels_filename):\n raise ValueError(f\"labels_last_column and labels_filename cannot both be False\")\n # Get the files according to the filter provided\n files = list(folder_path.glob(glob_filter))\n # Sort them files (avoids needing leading 0s)\n # https://stackoverflow.com/a/36202926/9963224\n files.sort(key=lambda var: [int(x) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', str(var))])\n # If no files are found, raise error\n if not files:\n raise ValueError(f\"{folder_path} with {glob_filter} filter had no results\")\n # Run the custom function if provided\n if custom_func is not None:\n filenames, datasets, label_sets = custom_func(files)\n else:\n # Initialize containers\n filenames = []\n datasets = []\n label_sets = []\n # Loop through the files\n for file in files:\n filenames.append(file.stem)\n # If the labels are in separate files, load them\n if labels_filename:\n if \"label\" in file:\n label_sets.append(np.loadtxt(file, **kwargs))\n else:\n datasets.append(np.loadtxt(file, **kwargs))\n # Otherwise the labels are in the last column\n elif labels_last_column:\n # Load the data\n data = np.loadtxt(file, **kwargs)\n # Add the datasets and labels\n label_sets.append(data[:, -1].astype(int))\n datasets.append(data[:, :-1])\n # Return the filenames, datasets, and labels\n return filenames, datasets, label_sets", "def read_random_data_from_csv(\n file_name, training_set_size, unlabeled_set_size, holdout_set_size, validation_set_size):\n data = samp_file_to_arr(\n file_name, training_set_size + unlabeled_set_size + holdout_set_size + validation_set_size)\n y_raw = np.array([x[0] for x in data])\n x_all = np.array([x[1:] for x in data])\n # Now transform so that the lower label is -1, always. \n uq = np.unique(y_raw) # Assumed to be only two unique labels!\n y_all = np.zeros(len(y_raw))\n y_all[np.where(y_raw == uq[0])[0]] = -1\n y_all[np.where(y_raw == uq[1])[0]] = 1\n xtrhoval, x_unl, ytrhoval, y_unl = sklearn.model_selection.train_test_split(\n x_all, y_all, test_size=unlabeled_set_size)\n x_trho, x_validate, y_trte, y_validate = sklearn.model_selection.train_test_split(\n xtrhoval, ytrhoval, test_size=validation_set_size)\n x_train, x_out, y_train, y_out = sklearn.model_selection.train_test_split(\n x_trho, y_trte, test_size=holdout_set_size)\n return (x_train, y_train, x_unl, y_unl, x_out, y_out, x_validate, y_validate)", "def load_data(outputpath):\n ext = '.npy'\n x_train = np.load(os.path.join(outputpath, 'X_train' + ext))\n y_train_binary = np.load(os.path.join(outputpath, 'y_train' + ext))\n x_val = np.load(os.path.join(outputpath, 'X_val' + ext))\n y_val_binary = np.load(os.path.join(outputpath, 'y_val' + ext))\n x_test = np.load(os.path.join(outputpath, 'X_test' + ext))\n y_test_binary = np.load(os.path.join(outputpath, 'y_test' + ext))\n with open(os.path.join(outputpath, 'labels.json'), 'r') as fn:\n labels = json.load(fn)\n return x_train, y_train_binary, x_val, y_val_binary, \\\n x_test, y_test_binary, labels", "def load_data():\n data = pd.read_csv(\"https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data\", header=None)\n\n # utiliza somente as duas primeiras classes\n data = data[:100]\n # transforma as classes em 0 e 1\n data[4] = np.where(data.iloc[:, -1] == 'Iris-setosa', 0, 1)\n data = np.asmatrix(data, dtype='float64')\n return data", "def read_labels(labels_path):\n data = []\n with open(labels_path, 'r') as f:\n for line in f:\n line = line.split()\n sample = (line[0], int(line[1]))\n data.append(sample)\n \n dtype = [('video', '<U50'), ('label', int)]\n X = np.array(data, dtype=dtype)\n X = np.sort(X, order='video')\n return X", "def load_csv_data(filepath, textcol=\"text\"):\n df = pd.read_csv(filepath)\n samples = [ str(text) for text in df[textcol] ]\n labels = [ str(intent) for intent in df[\"label\"] ]\n\n return samples, labels", "def load_data(class_fnames):\n X = []\n y = []\n for label, fnames in enumerate(class_fnames):\n for fname in fnames:\n X.append(cv2.imread(fname))\n y.append(label)\n X = np.stack(X)\n y = np.stack(y)\n return X, y", "def __load_csv_into_mem(label, exp, obj, norms):\n filename = obj.get('file')\n # def csv_loader\n label_pos = obj.get('label', 'first')\n if label_pos == 'first':\n label_first = True\n else:\n label_first = False\n\n labels = []\n\n def get_element_from_csv():\n def callback(dim, lbl):\n CSVDataset.__load_csv_into_mem.dimension = dim - 1\n for l in lbl:\n labels.append(l)\n\n with open(filename, 'r') as f:\n for i in CSVDataset.__get_element_from_file__(csv.reader(f), label_first, norms, callback):\n yield i\n\n input_data = np.fromiter(get_element_from_csv(), dtype=np.float32)\n dimension = CSVDataset.__load_csv_into_mem.dimension\n input_data = input_data.reshape((-1, dimension))\n # print input_data[0]\n labels = np.asarray(labels, 'int32')\n kwargs = {}\n if 'batches' not in kwargs:\n b = getattr(exp.args, '%s_batches' % label, None)\n kwargs['batches'] = b\n if 'size' not in kwargs:\n kwargs['size'] = exp.args.batch_size\n kwargs['label'] = label\n return SequenceDataset(*(input_data, labels), **kwargs)", "def load_labels(self, labels):\n self.labels = pd.DataFrame(labels, index=[\"label\"]).T", "def load_csv(filename):\r\n dataset = list()\r\n with open(filename, 'r') as file:\r\n csv_reader = reader(file, delimiter='\\t')\r\n for row in csv_reader:\r\n if not row:\r\n continue\r\n dataset.append([float(i) for i in row])\r\n return dataset", "def load_labels(path):\n with open(path, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n labels = []\n for row_number, content in enumerate(lines):\n pair = re.split(r'[:\\s]+', content.strip(), maxsplit=1)\n #if len(pair) == 2 and pair[0].strip().isdigit():\n labels.append(np.array([int(pair[0].strip()),pair[1].strip()]))\n #else:\n # labels.append(pair[0].strip())\n return np.array(labels)", "def load_training_data(fname):\n all_data = load_csv(fname, 'excel-tab')\n\n labels = [rec[2] == 'OFF' for rec in all_data]\n data = [convert_to_reals(clean_text(rec[1])) for rec in all_data]\n max_features = max([len(rec) for rec in data])\n\n # Pad the data\n for rec in data:\n rec.extend([0.0] * (max_features - len(rec)))\n\n return labels, data, max_features", "def load():\n filepath = dirname(abspath(__file__))\n##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####\n data = recfromtxt(open(filepath + '/spector.csv',\"rb\"), delimiter=\" \",\n names=True, dtype=float, usecols=(1,2,3,4))\n names = list(data.dtype.names)\n endog = array(data[names[3]], dtype=float)\n endog_name = names[3]\n exog = column_stack(data[i] for i in names[:3]).astype(float)\n exog_name = names[:3]\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name = endog_name, exog_name=exog_name)\n return dataset", "def readData(fname):\n pd = pandas.read_csv(fname)\n return [numpy.array(pd[colname]) for colname in pd.columns[1:]]", "def _get_data(self, path: str, label_column: str = None, header: bool = True):\n x, y = read_data(path, label_column, header)\n x = np.array(x)\n y = np.array(y)\n x = normalize(x)\n x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=self.train_size)\n return x_train, x_test, y_train, y_test", "def load_data(self):\n with open('data/fordTrain.csv') as f:\n data = csv.reader(f, delimiter=',')\n train = [x for i, x in enumerate(data) if i > 0] \n # Extract features and target variable separately\n trainx = [x[3:] for x in train]\n trainy = [x[2] for x in train]\n\n with open('data/fordTest.csv') as f:\n data = csv.reader(f, delimiter=',')\n testx = [x[3:] for i, x in enumerate(data) if i > 0] \n\n with open('data/Solution.csv') as f:\n data = csv.reader(f, delimiter=',')\n testy = [x[2] for i, x in enumerate(data) if i > 0] \n\n # Extract features and target variable, convert to numpy array\n trainx = np.asarray(trainx, dtype=np.float32)\n trainy = np.asarray(trainy, dtype=np.int8)\n testx = np.asarray(testx, dtype=np.float32)\n testy = np.asarray(testy, dtype=np.int8)\n\n # Return training and test sets\n trainSet = Dataset(trainx, trainy)\n testSet = Dataset(testx, testy)\n return trainSet, testSet", "def load_CSV_data(path):\n return np.genfromtxt(os.path.join('data/traffic_data', path))", "def load_data_set(filename):\n\n input_file = open(filename)\n\n num_features = len(input_file.readline().split('\\t')) - 1\n input_file.seek(0)\n data_mat = []\n label_mat = []\n\n for line in input_file.readlines():\n line_arr = []\n curr_line = line.strip().split('\\t')\n for i in range(num_features):\n line_arr.append(float(curr_line[i]))\n data_mat.append(line_arr)\n label_mat.append(float(curr_line[-1]))\n\n return data_mat, label_mat", "def load_gt(path):\n train_results = []\n with open(path) as csv_file:\n reader = csv.reader(csv_file, delimiter=\",\")\n next(reader)\n for row in reader:\n train_results.append(int(row[1]))\n return np.array(train_results)", "def parse_labels(file: str) -> ndarray:\n rows = []\n with open(file, 'r', encoding='utf-8') as f:\n for row in f:\n rows.append(row.strip())\n return array(rows)", "def labels_data(protein_data_path, columns):\n labels = pd.read_csv(protein_data_path, sep=\"\\t\").fillna(0)\n return labels[columns].astype(int).values", "def extract_data(filename):\n with open(filename, 'rb') as f:\n reader=f.readlines()\n train_data_label = [[int(x) for x in line.split() if x.isdigit()] for line in reader] \n # sorted by label\n train_data_label = sorted(train_data_label, key=lambda x: x[-1])\n train_data_label = np.array(train_data_label) \n return train_data_label", "def load_data_and_labels(data_file=train_file):\n \"\"\"\n There are 7 categories - \n 1. DEMO\n 2. DISE\n 3. TRMT\n 4. GOAL\n 5. PREG\n 6. FMLY\n 7. SOCL\n \"\"\"\n d = {}\n d['DEMO'] = [1, 0, 0, 0, 0, 0, 0]\n d['DISE'] = [0, 1, 0, 0, 0, 0, 0]\n d['TRMT'] = [0, 0, 1, 0, 0, 0, 0]\n d['GOAL'] = [0, 0, 0, 1, 0, 0, 0]\n d['PREG'] = [0, 0, 0, 0, 1, 0, 0]\n d['FAML'] = [0, 0, 0, 0, 0, 1, 0]\n d['SOCL'] = [0, 0, 0, 0, 0, 0, 1]\n\n max_len = -1\n\n #Load data from files\n samples = []\n with open(data_file, 'rb') as csvfile:\n spamreader = csv.reader(csvfile, delimiter='\\t', quotechar='|')\n for i, row in enumerate(spamreader):\n if (row[0] == \"Category\"):\n continue\n print (i, row[1])\n #samples.append([row[0], row[2]])\n #getting class and title = row[0] and row[1] respectively\n samples.append([row[1], row[2], row[0]])\n #split by words\n\n return samples", "def load_data(filename):\n assert os.path.exists(filename)==True\n dat = scipy.io.loadmat(filename)\n inputs = dat['inputs']\n #print len(inputs)\n targets = dat['targets']\n #print len(targets)\n assert len(inputs)==len(targets)\n\n global alldata\n global indim \n global outdim\n\n indim = len(inputs[0])\n outdim = 1\n #print indim\n alldata = ClassificationDataSet(indim, outdim, nb_classes = 8)\n alldata.setField('input',inputs)\n alldata.setField('target',targets)\n\n assert len(alldata['input'])==len(alldata['target'])\n print type(alldata)", "def load_data_and_labels(data_source, remove_stopword=False, run_with_keras=False):\n # Read the CSV file and get its contents\n with open(data_source, 'r', encoding='utf-8', errors='ignore') as f:\n csv_reader = csv.reader(f)\n # get the header\n header = next(csv_reader)\n label_idx = header.index('label')\n content_idx = header.index('content')\n print(f'The label index is : {label_idx} and the content index is : {content_idx}')\n\n y_text = list()\n x_text = list()\n\n for line in csv_reader:\n # get the sentence from the line\n sentence = line[content_idx].strip()\n x_text.append(sentence)\n y_text.append(int(line[label_idx]))\n\n # preprocess input text\n if run_with_keras:\n x_text = [clean_str(sent, remove_stopword) for sent in x_text]\n else:\n x_text = [clean_str(sent, remove_stopword).split(' ') for sent in x_text]\n\n # get the lengths for every line\n lengths = np.array(list(map(len, [sent for sent in x_text])))\n\n return [x_text, y_text, lengths]", "def load_test_data():\n\n # Load X_test\n with open('X_test.csv') as csvfile:\n reader = csv.DictReader(csvfile)\n feature_string_matrix = []\n for row in reader:\n feature_list = []\n for i in range(TEST_N):\n x_value = row['x' + str(i)]\n # Hit missing values\n if x_value == '':\n feature_list.append(np.nan)\n else:\n feature_list.append(float(row['x' + str(i)]))\n feature_string_matrix.append(feature_list)\n X_test = np.array(feature_string_matrix)\n return X_test", "def read_data(feature_file, label_file):", "def load_data(label_mode='fine'):\n if label_mode not in ['fine', 'coarse']:\n raise ValueError('`label_mode` must be one of `\"fine\"`, `\"coarse\"`.')\n\n dirname = 'cifar-100-python'\n origin = 'http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'\n path = get_file(dirname, origin=origin, untar=True)\n\n fpath = os.path.join(path, 'train')\n x_train, y_train = load_batch(fpath, label_key=label_mode + '_labels')\n\n fpath = os.path.join(path, 'test')\n x_test, y_test = load_batch(fpath, label_key=label_mode + '_labels')\n\n y_train = np.reshape(y_train, (len(y_train), 1))\n y_test = np.reshape(y_test, (len(y_test), 1))\n\n # Rescale raw data\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n\n x_train /= 255.\n x_test /= 255.\n\n if K.image_data_format() == 'channels_last':\n x_train = x_train.transpose(0, 2, 3, 1)\n x_test = x_test.transpose(0, 2, 3, 1)\n\n return (x_train, y_train), (x_test, y_test)", "def load():\n filepath = dirname(abspath(__file__))\n data = recfromtxt(filepath + '/scotvote.csv', delimiter=\",\",\n names=True, dtype=float, usecols=(1,2,3,4,5,6,7,8))\n names = list(data.dtype.names)\n endog = array(data[names[0]], dtype=float)\n endog_name = names[0]\n exog = column_stack(data[i] for i in names[1:]).astype(float)\n exog_name = names[1:]\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name = endog_name, exog_name=exog_name)\n return dataset", "def load_expected(filename):\n\n all_labels = sorted([int(k) for k in open(filename).readline().split()[1:]])\n data = numpy.loadtxt(filename, dtype='float64', skiprows=1)\n return all_labels, data[:,0].astype('int64'), data[:,1:]", "def load_data(file_name):\n data = np.load(file_name, allow_pickle=True)\n\n X, y = [], []\n\n for mfccs, label in data:\n X.append(mfccs)\n y.append(label)\n\n X = np.array(X)\n y = np.array(y)\n\n X = X.reshape(*X.shape, 1)\n y = y.reshape(-1, 1)\n\n return X, y", "def load_data_and_labels():\n # Load data from files\n positive_examples = list(\n open(\"./data/rt-polarity.pos\", \"r\", encoding='latin-1').readlines())\n positive_examples = [s.strip() for s in positive_examples]\n negative_examples = list(\n open(\"./data/rt-polarity.neg\", \"r\", encoding='latin-1').readlines())\n negative_examples = [s.strip() for s in negative_examples]\n # Split by words\n x_text = positive_examples + negative_examples\n x_text = [clean_str(sent) for sent in x_text]\n x_text = [s.split(\" \") for s in x_text]\n # Generate labels\n positive_labels = [[0, 1] for _ in positive_examples]\n negative_labels = [[1, 0] for _ in negative_examples]\n y = np.concatenate([positive_labels, negative_labels], 0)\n return [x_text, y]", "def read_img_names_labels_csv(csv_path):\n df = pd.read_csv(csv_path)\n\n try:\n image_names_list = list(df['ImageName'])\n y = list(df['Label'])\n except KeyError:\n raise CsvColumnNameException(\" The column names of image-name_label csv must be 'ImageName' and 'Label' \")\n\n return image_names_list, y", "def csv_parser(lines): \n\n data_points = []\n for line in lines:\n items = line.strip().split(\",\")\n try: #will fail on header line in file\n data_points.append(map(float, items[1:])) #first item is the label\n except ValueError: #must be the header\n continue\n return data_points", "def load_training_gt(list_files):\n training_results = []\n for res_file in list_files:\n with open(os.path.join(\"data\", res_file)) as csv_file:\n reader = csv.reader(csv_file, delimiter=\",\")\n next(reader)\n for row in reader:\n training_results.append(int(row[1]))\n return np.array(training_results)", "def load_data(filename):\n with open(\"./shopping.csv\", \"r\") as f:\n reader = csv.reader(f)\n next(reader)\n evidence_raw = []\n labels_raw = []\n for row in reader:\n evidence_raw.append(row[:-1])\n labels_raw.append(row[-1])\n evidence = []\n labels = []\n for row1, row2 in zip(evidence_raw, labels_raw):\n evidence.append(oneHotEncode_Evi(row1))\n labels.append(oneHotEncode_labels(row2))\n return (evidence, labels)", "def _load_images_labels(self):\n path_dataset_file = self.path_model_id.joinpath(f'{self.set_name}_set.csv')\n \n with path_dataset_file.open(mode='r', newline='') as f:\n csv_reader = reader(f, delimiter=',')\n rows = list(csv_reader)\n\n if self.shuffle:\n rng = default_rng(self.seed)\n rng.shuffle(rows)\n \n self.n_examples = len(rows)\n\n ds_files = tf.data.Dataset.from_tensor_slices(\n [path.join(str(self.path_data), f'label_{row[1]}', row[0])\n for row in rows])\n \n ds_images = ds_files.map(self._load_preprocess_image)\n\n class_labels_enc = self.class_le.fit_transform(\n [row[1] for row in rows])\n\n ds_labels = tf.data.Dataset.from_tensor_slices(\n class_labels_enc)\n\n return ds_images, ds_labels", "def load_labels(labels_dir, trial_name):\n labels_path = labels_dir + trial_name + \".txt\"\n raw_labels_data = np.genfromtxt(labels_path, dtype=np.int,\n converters=LABELS_CONVERTERS,\n usecols=LABELS_USECOLS)\n #print(\"rawlabelsdata: \", raw_labels_data)\n #print(get_first_frame(labels_path))\n frames = np.arange(get_first_frame(labels_path), get_last_frame(labels_path)+1, dtype=np.int)\n #print(\"frames: \", frames)\n #print(frames.shape)\n #labels = np.zeros(frames.shape, dtype=np.int)\n labels1 = []\n #print(labels)\n for start, end, label in raw_labels_data:\n #mask = (frames >= start) & (frames <= end)\n #print(start)\n #print(end)\n i = start\n while(i<end):\n if(i%6 == 0):\n labels1.append(label)\n i = i+1\n\n #labels[mask] = label\n #print(\"labels[mask]: \",labels[mask])\n labels1 = np.array(labels1)\n #print(labels1)\n labels_data = labels1.reshape(-1,1)\n #print(labels1.shape)\n #print(\"labels: \", labels_data)\n \n return labels_data", "def load_dataset_train():\n df_train = load_csv_file(\"31_train.csv\")\n df_train_target = load_csv_file(\"31_target_train.csv\")\n\n return df_train.values, df_train_target.values", "def load_data(path):\n train = pd.read_csv(os.path.join(path,'train.csv'))\n test = pd.read_csv(os.path.join(path,'test.csv'))\n \n return train, test", "def load_csv_dataset(path,\n label_col,\n data_dir=None,\n smi_col='smiles',\n explicit_H=False,\n use_chirality=False,\n use_molecular_attributes=False,\n all_pair_features=False,\n graph_distance=True):\n df = pd.read_csv(path)\n smiles = np.array(df[smi_col])\n labels = np.array(df[label_col])\n if data_dir is None:\n data_dir = tempfile.mkdtemp()\n dataset = MolDataset(root=data_dir)\n batch_graphs = []\n for i, (smi, l) in enumerate(zip(smiles, labels)):\n if i > 0 and i % 1000 == 0:\n print(\"Featurized %d molecules\" % i)\n dataset.add_graph_batch(batch_graphs)\n batch_graphs = []\n g = mol_to_graph(Chem.MolFromSmiles(smi),\n explicit_H=explicit_H,\n use_chirality=use_chirality,\n use_molecular_attributes=use_molecular_attributes,\n all_pair_features=all_pair_features,\n graph_distance=graph_distance)\n g.smi = smi\n w = (l==l) * 1\n y = copy.deepcopy(l)\n y[np.where(y != y)] = 0.\n g.y = t.from_numpy(y).long()\n g.w = t.from_numpy(w).float()\n batch_graphs.append(g)\n dataset.add_graph_batch(batch_graphs)\n return dataset", "def load_data(filename):\n data = []\n with open('data/' + filename) as raw_data:\n for line in raw_data.readlines():\n data.append(float(line.strip('\\n')))\n return data\n # data = np.mat(np.genfromtxt('data/' + filename)).T\n # return data", "def load_data(data_path):\n df = pd.read_csv(data_path, names=['feature1', 'feature2',\n 'label'], header=None)\n\n # Get x1 and x2\n feature_1 = df['feature1'].to_list()\n feature_2 = df['feature2'].to_list()\n\n # Normalize features using function below\n norm_feature_1 = normalize_feature(feature_1)\n norm_feature_2 = normalize_feature(feature_2)\n\n # Create concatenated features and get labels\n features = np.array([norm_feature_1, norm_feature_2])\n labels = df['label'].to_list()\n\n # Add features to data frame\n df['norm_x1'] = norm_feature_1\n df['norm_x2'] = norm_feature_2\n\n return features, labels, df", "def load_data(train_file, test_file):\n\n data = np.asarray(pd.read_csv(train_file, header=0))\n data_ts = np.asarray(pd.read_csv(test_file, header=0))\n\n x_tra = data[:, :-1]\n y_tra = data[:, -1]\n\n return x_tra, y_tra, data_ts", "def get_training_data():\n features = []\n labels = []\n\n with open('data.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n\n rows = [line for line in csv_reader]\n random.shuffle(rows)\n\n for vector in rows:\n feature_vector = [float(vector[i]) for i in range(4)]\n features.append(feature_vector)\n labels.append(encode_label(vector[4]))\n\n normalise_features(features)\n\n return features, labels", "def data_parser(data):\n\n with open(data, 'r') as inp:\n\n # take every sample\n # the last line in the text file is empty, so reading until -1\n samples = inp.read().split('\\n')[:-1]\n\n vec = []\n labels = []\n for sample in samples:\n # file is tab delimited\n split_samples = sample.split('\\t')\n # last column contains the label\n labels.append(int(split_samples[-1]))\n\n features = []\n for feature in split_samples[:-1]:\n features.append(float(feature))\n vec.append(features)\n\n # make the features and labels as a numpy array\n vec = np.array(vec)\n labels = np.array(labels)\n return vec, labels", "def from_label_file(cls, label_file_path, out_path=FEATURES_DATA_PATH, source_path=RAW_DATA_PATH):\n df = pd.read_csv(label_file_path)\n filenames = df['filename']\n labels = df['label']\n return cls(filenames, labels, out_path=out_path, source_path=source_path)", "def load_dataset(path):\n training_data = []\n with open(path) as csv_file:\n reader = csv.reader(csv_file, delimiter=\",\")\n next(reader)\n for row in reader:\n training_data.append(row[1])\n return training_data", "def load_label(path_file):\n if '.csv' not in path_file:\n raise FileNotFoundError('Only CSV format is supported currently')\n\n t0 = time()\n df = pd.DataFrame()\n\n with open(path_file, 'r') as f:\n # TODO: Implement the logic once the format is finalised\n pass\n\n logging.info('Loading label data with {} rows from {} takes {} secs'.format(df.shape[0],\n path_file, time() - t0))\n return df", "def _read_data_file(self, path_model_id):\n\n path_dataset_file = path_model_id.joinpath('training_set.csv')\n\n with path_dataset_file.open(mode='r', newline='') as f:\n csv_reader = reader(f, delimiter=',')\n rows = list(csv_reader)\n\n self.example_count = len(rows)\n\n img_files = [path.join(f'label_{row[1]}', row[0]) for row in rows]\n enc_labels = self.class_le.fit_transform([row[1] for row in rows])\n \n self.files_labels = [[img_files[i], enc_labels[i]]\n for i in range(self.example_count)]", "def read_csv_file(csv_fname, ignore_first_row = True):\n \n X, y = [], []\n with open(csv_fname, 'r') as csv_file:\n csv_reader = csv.reader(csv_file)\n if ignore_first_row:\n next(csv_reader)\n for row in csv_reader:\n X.append(row[:-1])\n y.append(row[-1])\n return np.array(X), np.array(y)", "def load_predictions(filepath):\n\twith open(filepath, 'r') as fh:\n\t\tcells = [line.split(',') for line in fh.read().splitlines()]\n\tdata = empty((TESTSIZE, NCLASSES), dtype = float64)\n\tfor k, row in enumerate(cells[1:]):\n\t\tdata[k, :] = row[1:]\n\treturn data", "def import_dataset(fpath):\r\n data = read_csv(fpath)\r\n print(data.head())\r\n print(data.shape)\r\n return data" ]
[ "0.706477", "0.68677425", "0.6859685", "0.6782772", "0.67529887", "0.6746655", "0.67292655", "0.66810787", "0.66234106", "0.65906125", "0.6552061", "0.65014195", "0.64909846", "0.646308", "0.6431648", "0.6414293", "0.6412473", "0.6407337", "0.63972247", "0.6389596", "0.63569885", "0.6344617", "0.634075", "0.63401765", "0.6335867", "0.6291479", "0.6288128", "0.62760293", "0.62760293", "0.6273798", "0.6265046", "0.6248506", "0.62457365", "0.6222311", "0.622082", "0.6216309", "0.61955035", "0.61708564", "0.6152052", "0.6149234", "0.61439884", "0.61419255", "0.6134468", "0.61071473", "0.6101974", "0.6094547", "0.60942346", "0.6094223", "0.6090441", "0.60881793", "0.60874313", "0.60864663", "0.60731447", "0.6060774", "0.60590875", "0.60518074", "0.60496044", "0.6042525", "0.60357213", "0.60310066", "0.60189897", "0.60189724", "0.6018446", "0.60129786", "0.6009791", "0.6006921", "0.60044384", "0.6001786", "0.59874046", "0.59808004", "0.597737", "0.5973995", "0.59676874", "0.59627616", "0.5951678", "0.5951081", "0.5949759", "0.59490055", "0.5946359", "0.59403914", "0.5934303", "0.59169835", "0.5916611", "0.5911365", "0.5907404", "0.59044945", "0.5898257", "0.5895065", "0.589372", "0.58917195", "0.5890448", "0.5889512", "0.5882536", "0.588173", "0.58713686", "0.5869744", "0.5867229", "0.5865214", "0.5864695", "0.58608305" ]
0.7765821
0
Given a vector of predictions, save results in CSV format.
def save_results(predictions, filename): with open(filename, 'w') as f: f.write("id,ACTION\n") for i, pred in enumerate(predictions): f.write("%d,%f\n" % (i + 1, pred))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_results(file_path, predictions):\n with open(file_path, \"w\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\",\")\n writer.writerow([\"Id\", \"Bound\"])\n for id, bound in enumerate(predictions):\n writer.writerow([id, bound])", "def write_predictions(y_pred, filename, yname=None) :\n out = open(filename, 'wb')\n f = csv.writer(out)\n if yname :\n f.writerow([yname])\n f.writerows(zip(y_pred))\n out.close()", "def write_predictions(y_pred, filename, yname=None) :\n out = open(filename, 'wb')\n f = csv.writer(out)\n if yname :\n f.writerow([yname])\n f.writerows(list(zip(y_pred)))\n out.close()", "def write_predictions(y_pred, filename, yname=None) :\n out = open(filename, 'wb')\n f = csv.writer(out)\n if yname :\n f.writerow([yname])\n f.writerows(list(zip(y_pred)))\n out.close()", "def write_predictions(pred, filename=\"pred.csv\"):\n output_file = open(filename, \"wb\")\n writer = csv.writer(output_file)\n datetimes = get_datetimes(\"test.csv\")\n\n writer.writerow([\"datetime\", \"count\"])\n\n for index, count in enumerate(pred):\n writer.writerow([datetimes[index], int(count)])\n\n output_file.close()", "def write_results_to_csv(ids,\n sentiments_actuals,\n sentiments_predictions,\n filename):\n output = pd.DataFrame(data={\n \"id\": ids,\n \"sentiment_actual\": sentiments_actuals,\n \"sentiment_predicted\": sentiments_predictions})\n output.to_csv(filename, index=False, quoting=3)", "def write_predictions_to_file(predictor, testDataFname, enc, outputFname, features=None):\n\n testData, _, testDataIds, _ = make_data(testDataFname, features=features, enc=enc)\n\n dt = datetime.now()\n predictions = predictor.predict(testData)\n print 'predicting took', datetime.now() - dt\n\n featureSelectionOutput = np.transpose(np.vstack((testDataIds, predictions.round().astype(int))))\n\n with open(outputFname, 'wb') as outputFile:\n writer = csv.writer(outputFile)\n writer.writerow(['id', 'loss'])\n writer.writerows(featureSelectionOutput)", "def store_classes_and_predictions(output_file_path, classes, predictions):\n with open(output_file_path, mode='a', newline='') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',')\n csvwriter.writerow(['true', 'predicted'])\n for i in range(len(classes)):\n csvwriter.writerow([classes.iloc[i], predictions.iloc[i]])", "def write_results(self, results):\n predictions = open('hmm_results.csv', 'w')\n predictions.write(\"Type,Prediction\")\n for type in results:\n if type == 'O':\n continue\n predictions.write(\"\\n\" + str(type) + \",\")\n for interval in results[type]:\n predictions.write(str(interval) + \" \")\n predictions.close()", "def create_csv_submission(ids, y_pred, name):\n with open(name, 'w') as csvfile:\n fieldnames = ['Id', 'Prediction']\n writer = csv.DictWriter(csvfile, delimiter=\",\", fieldnames=fieldnames)\n writer.writeheader()\n for r1, r2 in zip(ids, y_pred):\n writer.writerow({'Id':r1,'Prediction':round(r2)})", "def write_predictions(prediction_dic, result_path):\n with open(result_path, 'wb') as outfile:\n outfile.write(bytes('Patient_ID,HPV/p16_status\\n', 'UTF-8'))\n for patient_id, pred in prediction_dic.items():\n outfile.write(bytes(str(patient_id) + ',' + str(pred) + '\\n', 'UTF-8'))", "def predictions_to_csv(outstream, decomposition: FreeWilsonDecomposition, predictions):\n writer = None\n for pred in predictions:\n if not writer:\n rgroups = set()\n for rgroup in decomposition.rgroups:\n rgroups.add(rgroup)\n rgroups = sorted(rgroups, key=_rgroup_sort)\n\n lookup = {}\n for i, rg in enumerate(rgroups):\n lookup[rg] = i\n writer = csv.writer(outstream)\n header = ['smiles', 'prediction'] + [f\"{rg}_smiles\" for rg in list(rgroups)]\n writer.writerow(header)\n rg = [\"\"] * len(lookup)\n for s in pred.rgroups:\n rg[lookup[s.rgroup]] = s.smiles\n\n row = [pred.smiles, repr(pred.prediction)] + rg\n writer.writerow(row)\n return header", "def log_inference(tester, name, description):\r\n\tfor dataset, output in tester.preds.items():\r\n\t\tresults = pandas.DataFrame.from_dict(output)\r\n\t\tpath = os.path.join(\r\n\t\t\tEXPERIMENT_PATH, tester.config[\"name\"] + '-' + dataset)\r\n\t\twith open(path + \".csv\", \"w\") as f:\r\n\t\t\tresults.to_csv(f, sep=\"\\t\", encoding='utf-8', \r\n\t\t\t\tfloat_format='%.3f', index=False)\r\n\r\n\t\twith open(path + \"-predictions.csv\", \"w\") as f:\r\n\t\t\tresults[[\"tag\", \"y_hat\"]].to_csv(\r\n\t\t\t\tf, index=False, float_format='%.3f', header=False)", "def save_predictions(gtfilename, loss_type, probs, preds, outfile):\n\n # 1. get file ids\n liste_fileids = []\n targets = []\n passFirstLine=True\n with open(gtfilename, 'r') as fh:\n for line in fh:\n if passFirstLine:\n passFirstLine = False\n continue\n tmp = line.rstrip().split(',')\n liste_fileids.append(tmp[0])\n targets.append(tmp[1])\n\n print 'liste_fileids', len(liste_fileids)\n # 2. save preds\n import csv\n with open(outfile, 'w') as csvfile:\n # fieldnames = ['itemid', 'hasbird', 'pred', 'gt']\n fieldnames = ['itemid', 'hasbird']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n if loss_type == 'categorical_crossentropy':\n for i, id in enumerate(liste_fileids):\n # writer.writerow({'itemid': id, 'hasbird': probs[i, 1], 'pred': preds[i], 'gt': targets[i]})\n writer.writerow({'itemid': id, 'hasbird': probs[i, 1]})\n elif loss_type == 'binary_hinge' or loss_type == 'binary_crossentropy' or loss_type == 'weighted_binary_crossentropy':\n for i, id in enumerate(liste_fileids):\n # writer.writerow({'itemid': id, 'hasbird': probs[i][0], 'pred': preds[i], 'gt': targets[i]})\n writer.writerow({'itemid': id, 'hasbird': probs[i][0]})\n\n print \"INFO: predictions (positive class probas) saved to file:\", outfile", "def create_csv_submission(ids, y_pred, name):\n with open(name, 'w') as csvfile:\n fieldnames = ['Id', 'Prediction']\n writer = csv.DictWriter(csvfile, delimiter=\",\", fieldnames=fieldnames)\n writer.writeheader()\n for r1, r2 in zip(ids, y_pred):\n writer.writerow({'Id': int(r1), 'Prediction': int(r2)})", "def create_csv_submission(ids, y_pred, name):\n with open(name, 'w') as csvfile:\n fieldnames = ['Id', 'Prediction']\n writer = csv.DictWriter(csvfile, delimiter=\",\", fieldnames=fieldnames)\n writer.writeheader()\n for r1, r2 in zip(ids, y_pred):\n writer.writerow({'Id':int(r1),'Prediction':int(r2)})", "def create_csv_submission(ids, y_pred, name):\n with open(name, 'w') as csvfile:\n fieldnames = ['Id', 'Prediction']\n writer = csv.DictWriter(csvfile, delimiter=\",\", fieldnames=fieldnames)\n writer.writeheader()\n for r1, r2 in zip(ids, y_pred):\n writer.writerow({'Id':int(r1),'Prediction':int(r2)})", "def create_csv_submission(ids, y_pred, name):\n with open(name, 'w') as csvfile:\n fieldnames = ['Id', 'Prediction']\n writer = csv.DictWriter(csvfile, delimiter=\",\", fieldnames=fieldnames)\n writer.writeheader()\n for r1, r2 in zip(ids, y_pred):\n writer.writerow({'Id':int(r1),'Prediction':int(r2)})", "def create_csv_submission(ids, y_pred, name):\n with open(name, 'w') as csvfile:\n fieldnames = ['Id', 'Prediction']\n writer = csv.DictWriter(csvfile, delimiter=\",\", fieldnames=fieldnames)\n writer.writeheader()\n for r1, r2 in zip(ids, y_pred):\n writer.writerow({'Id':int(r1),'Prediction':int(r2)})", "def create_csv_submission(ids, y_pred, name):\n with open(name, 'w') as csvfile:\n fieldnames = ['Id', 'Prediction']\n writer = csv.DictWriter(csvfile, delimiter=\",\", fieldnames=fieldnames)\n writer.writeheader()\n for r1, r2 in zip(ids, y_pred):\n writer.writerow({'Id':int(r1),'Prediction':int(r2)})", "def create_csv_submission(ids, y_pred, name):\n with open(name, 'w') as csvfile:\n fieldnames = ['Id', 'Prediction']\n writer = csv.DictWriter(csvfile, delimiter=\",\", fieldnames=fieldnames)\n writer.writeheader()\n for r1, r2 in zip(ids, y_pred):\n writer.writerow({'Id':int(r1),'Prediction':int(r2)})", "def save_results(self):\n results = pd.concat([\n pd.DataFrame(self.IDs.cpu().numpy(), columns= ['ID']), \n pd.DataFrame(self.predicted_labels.cpu().numpy(), columns= ['predicted_label']),\n pd.DataFrame(self.correct_predictions.cpu().numpy(), columns= ['correct_prediction']),\n pd.DataFrame(self.epistemic_uncertainty.cpu().numpy(), columns= ['epistemic_uncertainty']), \n pd.DataFrame(self.aleatoric_uncertainty.cpu().numpy(), columns= ['aleatoric_uncertainty']), \n pd.DataFrame(self.total_uncertainty.cpu().numpy(), columns= ['total_uncertainty']), \n ], axis=1)\n\n create_results_directory()\n results.to_csv('results/{}_{}_results.csv'.format(self.__class__.__name__, datetime.datetime.now().replace(microsecond=0).isoformat()), index=False)", "def exportEvaluation(self,results,url):\n profbox()\n if not os.path.exists(url):\n open(url, 'w').close()\n myfile = open(url, 'a')\n\n wr = csv.writer(myfile)\n r = numpy.array(results)\n if len(r.shape) == 1:\n wr.writerow(results)\n else:\n wr.writerows(results)", "def save_prediction(self, meta, y_pred, y, filename):\n df = pd.DataFrame(meta)\n df['y_pred'] = y_pred\n df['y'] = y\n print(df)\n df.loc[:, 'id'] = df.index\n self.df_to_csv(df, filename, store_header=False)", "def create_csv_submission(ids, y_pred, name):\n # negative class has to be labelled -1 on AIcrowd\n y_pred[y_pred == 0] = -1\n\n with open(name, 'w') as csvfile:\n fieldnames = ['Id', 'Prediction']\n writer = csv.DictWriter(csvfile, delimiter=\",\", fieldnames=fieldnames)\n writer.writeheader()\n for r1, r2 in zip(ids, y_pred):\n writer.writerow({'Id':int(r1),'Prediction':int(r2)})", "def __write_csv(self, prediction_probs, n, filename):\n d = {'Id': pd.Series([i for i in xrange(1, n + 1)]),\n 'Action': pd.Series(prediction_probs)}\n df = pd.DataFrame(d)\n df = df[['Id', 'Action']]\n df.to_csv(filename, sep=',', encoding='utf-8',\n index=False)", "def exportEvaluation(self, results, url):\r\n # research\r\n profprint()\r\n if not os.path.exists(url):\r\n print \"creating new results file: \",url\r\n open(url, 'w').close()\r\n myfile = open(url, 'a')\r\n\r\n wr = csv.writer(myfile)\r\n r = numpy.array(results)\r\n if len(r.shape) == 1:\r\n wr.writerow(results)\r\n else:\r\n wr.writerows(results)", "def writeResultsToDisk(header, results, data, filename):\n for i, datum in enumerate(data):\n np.append(data, results[i])\n header.append(\"diagnosis\")\n np.insert(data, 0, header)\n with open(filename, 'wb') as csvfile:\n writer = csv.writer(csvfile, delimiter=',',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n for row in data:\n writer.writerow(row)\n return header, np.array(data)", "def write_results(results):\n with RESULTS_PATH.open(\"w\") as writer:\n csvwriter = csv.writer(writer)\n csvwriter.writerows(results)", "def save_predictions(battle_name: str, data: str, predictions: List):\n path = './data_reader/data/predictions/' + data + '.' + battle_name\n with open(path, 'w') as outfile:\n for prediction in predictions:\n outfile.write(str(prediction) + '\\n')", "def submit_predictions(\n sub_name: str, predictions: jnp.ndarray, id_col: jnp.array\n):\n with open(os.path.join(\"data\", sub_name), \"w\") as sub_file:\n sub_file.write(\"Id,SalePrice\\n\")\n for (example_id, pred) in zip(id_col, jnp.squeeze(predictions)):\n sub_file.write(f\"{example_id},{pred}\\n\")", "def log_results(self, path):\n pd.DataFrame(self.results).to_csv(path)", "def save_performances(self):\r\n nb_datasets = len(self.results)\r\n resu = [[] for k in range(nb_datasets)]\r\n\r\n # fetch results\r\n for k in range(nb_datasets):\r\n best = np.argmax(self.results[k]['mean_test_score'])\r\n resu[k].append(('score', self.results[k]['mean_test_score'][best]))\r\n resu[k] = resu[k] + list(self.results[k]['params'][best].items())\r\n\r\n # write results in csv\r\n for k, resu in enumerate(resu):\r\n with open('results/final_results_{}.csv'.format(k), 'a') as file:\r\n writer = csv.writer(file)\r\n writer.writerow(resu)", "def write_csv(estimates: ListOfDicts, output_csv: str) -> None:\n with open(output_csv, \"w\") as f:\n writer = csv.DictWriter(f, fieldnames=estimates[0].keys())\n writer.writeheader()\n for row in estimates:\n writer.writerow(row)\n logging.info(f\"Wrote estimates as {output_csv}\")", "def save_output(pris):\n pris.to_csv('reactors_pris_2016.csv',\n index=False,\n sep=',',\n )", "def __create_output_csv(self, df, score_list, elapsed_list):\n df['Similar']=score_list\n df['Elapsed']=elapsed_list\n df.to_csv('Output.csv',index=False)\n return df", "def save_prediction(predictions, image_file, path):\n\t\n\tsave_file = convert_file_extension_to_txt(image_file)\n\t\n\twith open(os.path.join(path, save_file), 'w') as f:\n\t\tfor prediction in predictions:\n\t\t\tf.write(str(prediction) + \"\\n\")", "def to_csv(self, out_folder):\n import pandas as pd\n\n df = pd.DataFrame(zip(self.results['cids'],\n self.results['differences'],\n self.results['experimental_values']),\n columns=['cids', 'differences',\n 'experimental_values'])\n df.to_csv(out_folder, index=False)", "def writePredictions(outfile, pred, proba, y, data, evalmode=False):\n if evalmode:\n header = ['chr', 'start', 'end', 'prediction', 'true label']\n for i in range(np.shape(proba)[1]):\n header.append(\"probability:\"+str(i))\n pd.DataFrame(np.concatenate((data.values[:,0:3],np.transpose(pred[np.newaxis]).astype(int),np.transpose(y[np.newaxis]), proba), axis=1)[:,:]).to_csv(outfile, sep=\"\\t\", index=None, header=header)\n else:\n header = ['chr', 'start', 'end', 'prediction']\n for i in range(np.shape(proba)[1]):\n header.append(\"probability:\"+str(i))\n pd.DataFrame(np.concatenate((data.values[:,0:3],np.transpose(pred[np.newaxis]).astype(int), proba), axis=1)[:,:]).to_csv(outfile, sep=\"\\t\", index=None, header=header)", "def createFileCSV(table, path=\"./prediction\"):\t\n\tif len(table) < 1:\n\t\traise NameError('Empty Table!')\n\telse:\n\t\tfile = open(path + '.csv', 'w+')\n\n\t\tfile.write(table[0].toStringHeaders() + \"\\n\")\n\n\t\tfor row in table:\n\t\t\tfile.write(row.toStringCSV() + '\\n')\n\t\tfile.close()", "def write_to_file(self, results):\n with open(self.outputFilename, \"w\") as csvFile:\n csvWriter = csv.writer(csvFile, delimiter=',') \n title_row = ('asset_id', 'component_id', 'latitude', 'longitude', 'installation_date', 'commissioning_date', 'street_name', 'cabinet_id', 'nominal_wattage', 'current_time', 'current_LogValue', 'current_IsLogValueOff') \n csvWriter.writerow(title_row)\n for record in results:\n csvWriter.writerow(record)", "def export_to_ranking_csv(request, token, project):\n from appraise.local_settings import EXPORT_TOKEN\n if not token == EXPORT_TOKEN:\n return HttpResponseForbidden()\n \n annotation_project = get_object_or_404(Project, name=project)\n \n queryset = RankingResult.objects.filter(item__hit__completed=True)\n\n results = [u'srclang,trglang,srcIndex,doucmentId,segmentId,judgeId,' \\\n 'system1Number,system1Id,system2Number,system2Id,system3Number,' \\\n 'system3Id,system4Number,system4Id,system5Number,system5Id,' \\\n 'system1rank,system2rank,system3rank,system4rank,system5rank']\n \n for result in queryset:\n if isinstance(result, RankingResult):\n if result.item.hit.project_set.filter(id=annotation_project.id):\n # Current implementation of export_to_pairwise_csv() is weird.\n # By contrast, export_to_csv() generates the right thing...\n current_csv = result.export_to_csv()\n if current_csv is None:\n continue\n results.append(current_csv)\n \n export_csv = u\"\\n\".join(results)\n export_csv = export_csv + u\"\\n\"\n return HttpResponse(export_csv, mimetype='text/plain')", "def create_csv_submission_prob(ids, y_pred, y_prob, name):\n # negative class has to be labelled -1 on AIcrowd\n y_pred[y_pred == 0] = -1\n\n df = pd.DataFrame({'id': ids, 'label': y_pred, 'prob': y_prob})\n df.to_csv(name, sep=\",\", index=False)", "def log_evaluation(tester, name, description):\r\n\tfor dataset, output in tester.preds.items():\r\n\t\tresults = pandas.DataFrame.from_dict(output)\r\n\t\tpath = os.path.join(\r\n\t\t\tEXPERIMENT_PATH, tester.config[\"name\"] + '-' + dataset)\r\n\t\twith open(path + \".csv\", \"w\") as f:\r\n\t\t\tresults.to_csv(f, sep=\"\\t\", encoding='utf-8',\r\n\t\t\t\tfloat_format='%.3f', index=False)", "def save_reviews_to_csv(language, review_list, dataset):\n with open('reviews_'+dataset+'_'+language+'.csv', 'w') as csvfile:\n fieldnames = review_list[0].__dict__.keys()\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n writer.writeheader()\n for review in review_list:\n writer.writerow(review.__dict__)", "def save_learning_data(path, num_episodes, avg_rewards, std_rewards, avg_losses, std_losses):\n rows = zip(num_episodes, avg_rewards, std_rewards, avg_losses, std_losses)\n with open(path + '/learning_data.csv', 'w') as f:\n w = csv.writer(f)\n w.writerows(rows)", "def write_output_file(ad_models):\n\n with open('output-data-utf8.csv', 'w', newline='', encoding='UTF-8') as output_file:\n csv_writer = csv.writer(output_file, delimiter=',')\n for ad in ad_models:\n csv_writer.writerow((ad.date.strftime('%Y/%m/%d'), ad.country_code, ad.impression, ad.clicks))", "def write_relevance_tocsv(relevance, corpus):\n csv_filename = config.CORPUS[corpus]['relevance_file']\n print('writing relevance')\n print(relevance)\n with open(csv_filename, 'w') as file:\n csv.writer(file).writerows((k,) + v for k, v in relevance.items())", "def save_results(self, path):\n create_folder(path)\n self.get_scores().to_csv(path + r'/scores.csv', index=False)\n self.get_results().to_csv(path + r'/results.csv', index=False)\n self.get_pivot_last_epoch().to_csv(path + r'/pivot_last_epoch.csv', index=True)", "def save_csv(outfile, movies):\n writer = csv.writer(outfile)\n writer.writerow(['Title', 'Rating', 'Year', 'Actors', 'Runtime'])\n for movie in movies:\n writer.writerow(movie)\n\n # ADD SOME CODE OF YOURSELF HERE TO WRITE THE MOVIES TO DISK", "def output_predictions(predictions_file, relations, predictions, test_set_keys, test_labels):\n with codecs.open(predictions_file, 'w', 'utf-8') as f_out:\n for i, (w1, w2) in enumerate(test_set_keys):\n f_out.write('\\t'.join([w1, w2, relations[test_labels[i]], relations[predictions[i]]]) + '\\n')", "def write_forecasts(forecast_dict, output):\n\n for objective_id, forecast_value in list(forecast_dict.items()):\n headers = [f[\"model\"] for f in forecast_value]\n points = []\n if not forecast_value:\n sys.exit(\"No forecasts available\")\n for index in range(len(forecast_value[0][\"point_forecast\"])):\n points.append([f[\"point_forecast\"][index] for f in forecast_value])\n output_file = \"%s_%s.csv\" % (output, objective_id)\n with UnicodeWriter(output_file, lineterminator=\"\\n\") as out_handler:\n out_handler.writerow(headers)\n for row in points:\n out_handler.writerow(row)", "def write_model_results(model, input_file, repr, tags, outpath):\n input, input_data = read_input(input_file)\n\n if repr == \"c\":\n x = utils.get_features(input, ixs=3)\n else:\n x = utils.get_features(input, chars=True)\n\n w_batcher = utils.AutoBatcher(x, x, batch_size=1, shuffle=False)\n labels = []\n for inputs, _ in w_batcher.get_batches():\n output = torch.max(model(inputs), 1)[1]\n labels += output.cpu().data.numpy().tolist()\n\n predictions = utils.NEWLINE.join([\"{} {}\".format(input_data[i], tags[labels[i]])\\\n for i in range(len(input_data))])\n with open(outpath, \"w\") as outfile:\n outfile.write(predictions)", "def save_results_csv(fname, results, header=0):\n\n new_rows = []\n if not os.path.isfile(fname):\n args = fname.split('/')[:-1]\n directory = os.path.join(*args)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n with open(fname, 'wt') as f:\n writer = csv.writer(f)\n if header == 0:\n writer.writerows(\n [['Model', 'Dataset', 'Method', 'Weight', 'Label', \n 'Step', 'AUROC', 'Precision', 'Recall',\n 'F1 score', 'Random Seed', 'Date']])\n if header == 1:\n writer.writerows(\n [['Precision', 'Recall', 'F1 score', 'Random Seed']])\n elif header ==2:\n writer.writerows(\n [['Step', 'AUROC', 'Precision', 'Recall',\n 'F1 score', 'Random Seed']])\n\n elif header == 5:\n writer.writerows(\n [['Model', 'Dataset', 'Method', 'Weight', 'Label', \n 'Step', 'Scores']])\n\n with open(fname, 'at') as f:\n # Overwrite the old file with the modified rows\n writer = csv.writer(f)\n new_rows.append(results) # add the modified rows\n writer.writerows(new_rows)", "def output_predictions(predictions, df_symbol_date, file_name):\n\n # Retrieve baaged prediction\n pred_df = predictions['deep_bagged_predictions']\n\n # Create dataframe by resetting the index to allow columns to be concatenated\n output_df = pd.concat([df_symbol_date.reset_index(\n drop=True), pred_df.reset_index(drop=True)], axis=1)\n\n # Save output to file\n pred_file_location = './predictions/' + file_name + '.csv'\n print('Writing predictions to', pred_file_location)\n output_df.to_csv(pred_file_location)", "def save_results(output_dir,\n check_file,\n results,\n exp_string,\n identifier,\n shuffle_labels,\n model_options,\n predictor='classify',\n fold_no=None,\n titration_ratio=None):\n\n signal = 'shuffled' if shuffle_labels else 'signal'\n\n if not isinstance(model_options.training_data, str):\n training_data = '.'.join(model_options.training_data)\n else:\n training_data = model_options.training_data\n\n if isinstance(model_options.n_dim, list):\n n_dim = '.'.join(map(str, model_options.n_dim))\n else:\n n_dim = model_options.n_dim\n\n if predictor == 'classify':\n auc_df = pd.concat(results[\n '{}_auc'.format(exp_string)\n ])\n output_file = construct_filename(output_dir,\n 'auc_threshold_metrics',\n '.tsv.gz',\n identifier,\n training_data,\n model_options.model,\n signal,\n s=model_options.seed,\n n=n_dim,\n f=fold_no,\n t=titration_ratio)\n auc_df.to_csv(\n output_file, sep=\"\\t\", index=False, float_format=\"%.5g\"\n )\n\n aupr_df = pd.concat(results[\n '{}_aupr'.format(exp_string)\n ])\n output_file = construct_filename(output_dir,\n 'aupr_threshold_metrics',\n '.tsv.gz',\n identifier,\n training_data,\n model_options.model,\n signal,\n s=model_options.seed,\n n=n_dim,\n f=fold_no,\n t=titration_ratio)\n aupr_df.to_csv(\n output_file, sep=\"\\t\", index=False, float_format=\"%.5g\"\n )\n\n if '{}_coef'.format(exp_string) in results:\n coef_df = pd.concat(results[\n '{}_coef'.format(exp_string)\n ])\n coef_df.to_csv(\n check_file, sep=\"\\t\", index=False, float_format=\"%.5g\"\n )\n\n metrics_df = pd.concat(results[\n '{}_metrics'.format(exp_string)\n ])\n\n if '{}_preds'.format(exp_string) in results:\n preds_df = pd.concat(results[\n '{}_preds'.format(exp_string)\n ])\n else:\n preds_df = None\n\n if '{}_param_grid'.format(exp_string) in results:\n params_df = pd.concat(results[\n '{}_param_grid'.format(exp_string)\n ])\n else:\n params_df = None\n\n output_file = construct_filename(output_dir,\n 'metrics',\n '.tsv.gz',\n identifier,\n training_data,\n model_options.model,\n signal,\n predictor,\n s=model_options.seed,\n n=n_dim,\n f=fold_no,\n t=titration_ratio)\n metrics_df.to_csv(\n output_file, sep=\"\\t\", index=False, float_format=\"%.5g\"\n )\n\n if preds_df is not None:\n output_file = construct_filename(output_dir,\n 'preds',\n '.tsv.gz',\n identifier,\n training_data,\n model_options.model,\n signal,\n predictor,\n s=model_options.seed,\n n=n_dim,\n f=fold_no,\n t=titration_ratio)\n preds_df.to_csv(\n output_file, sep=\"\\t\", float_format=\"%.5g\"\n )\n\n if params_df is not None:\n output_file = construct_filename(output_dir,\n 'param_grid',\n '.tsv.gz',\n identifier,\n training_data,\n model_options.model,\n signal,\n predictor,\n s=model_options.seed,\n n=n_dim,\n f=fold_no)\n\n params_df.to_csv(output_file, sep=\"\\t\")", "def save_results_csv(fname, results, header=0):\n\n new_rows = []\n if not os.path.isfile(fname):\n args = fname.split('/')[:-1]\n directory = os.path.join(*args)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n with open(fname, 'wt') as f:\n writer = csv.writer(f)\n if header == 0:\n writer.writerows(\n [['Model', 'Dataset', 'Method', 'Weight', 'Label', \n 'Step', 'AUPRC', 'AUROC', 'Precision', 'Recall',\n 'F1 score', 'Random Seed', 'Date']])\n if header == 1:\n writer.writerows(\n [['Precision', 'Recall', 'F1 score', 'Random Seed']])\n elif header ==2:\n writer.writerows(\n [['Step', 'AUPRC', 'AUROC', 'Precision', 'Recall',\n 'F1 score', 'Random Seed']])\n\n elif header == 5:\n writer.writerows(\n [['Model', 'Dataset', 'Method', 'Weight', 'Label', \n 'Step', 'Scores']])\n\n with open(fname, 'at') as f:\n # Overwrite the old file with the modified rows\n writer = csv.writer(f)\n new_rows.append(results) # add the modified rows\n writer.writerows(new_rows)", "def save_predicted_results(predicted_results):\n # Save the model\n with open(\"predicted_results\", \"wb\") as predicted_results_file:\n pickle.dump(predicted_results, predicted_results_file)", "def write_to_file(data, file_to_output):\n # with open('X_train.csv','a') as f_handle:\n # np.savetxt(f_handle, X_train, fmt='%s', delimiter=\",\")\n\n with open(file_to_output, 'w') as f:\n for item in data.tolist():\n f.write(item + '\\n')", "def save_csv(vals: Vals):\n logging.info('Writing data to csv file')\n with open(PureWindowsPath(os.path.realpath(__file__)).parent / 'results.csv', 'w', newline='') as csvfile:\n csvwriter = csv.writer(csvfile)\n csvwriter.writerow(('X', 'Y'))\n\n for x, y in dict(zip(vals.x, vals.y)).items():\n csvwriter.writerow((x, y))\n\n logging.info('Finished writing')\n messagebox.showinfo('Save to CSV', 'Successfully saved!')", "def write_to_csv(results, filename):\r\n fieldnames = ('datetime_utc', 'distance_au', 'velocity_km_s',\r\n 'designation', 'name', 'diameter_km',\r\n 'potentially_hazardous')\r\n\r\n with open(filename, 'w') as outfile:\r\n writer = csv.writer(outfile)\r\n writer.writerow(fieldnames)\r\n for row in results:\r\n r = [row.time, row.distance, row.velocity, row.neo.designation,\r\n row.neo.name, row.neo.diameter, row.neo.hazardous]\r\n writer.writerow(r)", "def save_predictions(self,file_path):\n # compute average of predictions\n num_examples = len(self.labels)\n\n if num_examples == 0:\n raise Exception (\"nothing to save\")\n\n def string_to_average(string):\n return np.average(np.array(string.split(\",\"),dtype=float))\n prediction_averages = np.around(map(string_to_average,self.predictions),decimals=3)\n\n # sort by prediction averages\n order = np.flipud(prediction_averages.argsort())\n prediction_averages = prediction_averages[order]\n self.pl_pairs = self.pl_pairs[order]\n self.predictions = self.predictions[order]\n self.labels = self.labels[order]\n # write all of the predictions to the file\n f = open(file_path + \"_predictions.txt\", 'w')\n\n for i in range(num_examples):\n f.write((str(prediction_averages[i]) + \" \"*10)[:10]\n + (str(self.labels[i]) + \" \"*50)[:10]\n + str(self.pl_pairs[i] + \" \"*50)[:50]\n + str(self.predictions[i] + \" \"*50)[:50]\n + \"\\n\")\n\n f.close()\n # write and save some metadata\n\n f = open(file_path + \"_scores.txt\", 'w')\n f.write(\"top 100 score: \")\n f.write(str(self.top_100_score(self.predictions,self.labels)))\n f.write(\"\\nAUC: \")\n f.write(str(self.auc(prediction_averages,self.labels)))\n f.write(\"\\nconfusion matrix: \")\n f.write(str(self.confusion_matrix(prediction_averages,self.labels)))\n f.close()\n\n # write a file in Kaggle MAP{K} submision format\n # the form is:\n # Protein1, Ligand3 Ligand4 Ligand2\n # Protein2, Ligand5 Ligand9 Ligand7\n\n raw_database_array = np.genfromtxt(FLAGS.test_set_file_path, delimiter=',', dtype=str)\n receptor_set = raw_database_array[:,2]\n receptor_set = list(set(map(lambda x:x.split('.')[0].split('/')[-1],receptor_set)))\n submission = {}\n for i in range(num_examples):\n # get the name of the ligand and protein\n ligand,receptor = self.pl_pairs[i].split(',')\n ligand = ligand.split('/')[-1].split('.')[0]\n receptor = receptor.split('/')[-1].split('.')[0]\n # add all protein-ligand pairs to submission\n if not receptor in submission.keys():\n submission[receptor] = {}\n submission[receptor]['ligands'] = [ligand]\n submission[receptor]['score'] = [prediction_averages[i]]\n else:\n submission[receptor]['ligands'].append(ligand)\n submission[receptor]['score'].append(prediction_averages[i])\n \n # write and save submisison to file\n # if failed to predict any liagnd for a receptor\n # use placeholder 'L' as predict result\n # e.g. P1234,L\n with open(file_path+'_submission.csv','w') as f:\n f.write('Id,Expected\\n')\n for key in receptor_set:\n if key in submission.keys():\n ligands = np.array(submission[key]['ligands'])\n scores = np.array(submission[key]['score'])\n ligands = ligands[np.flipud(scores.argsort())]\n f.write(key+','+' '.join(ligands)+'\\n')\n else:\n f.write(key+','+'L'+'\\n')", "def save_predictions(prediction_maps, output_file, dataset_names):\n assert len(prediction_maps) == len(dataset_names), 'Each prediction map has to have a corresponding dataset name'\n logger.info(f'Saving predictions to: {output_file}...')\n\n with h5py.File(output_file, \"w\") as output_h5:\n for prediction_map, dataset_name in zip(prediction_maps, dataset_names):\n logger.info(f\"Creating dataset '{dataset_name}'...\")\n output_h5.create_dataset(dataset_name, data=prediction_map, compression=\"gzip\")", "def log_history_to_csv_file(metaparams, history, csv_file):\n csv_writer = csv.writer(csv_file, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n for i in range(len(history.history['loss'])):\n training_perfs = [i+1, history.history['loss'][i], history.history['acc'][i], history.history['val_loss'][i], history.history['val_acc'][i]]\n csv_writer.writerow(metaparams + training_perfs)\n return;", "def write_predictions_to_s3(self, fold_predictions: pd.DataFrame, output_path: str):\n \n # prepare dataframe\n prediction_columns = fold_predictions.columns[['prediction_' == x[:11] for x in fold_predictions.columns]].tolist()\n fold_predictions = fold_predictions[fold_predictions.train_or_test == 'test'] # only save test set\n fold_predictions = fold_predictions[['sf_account_id'] + prediction_columns] # only save salesforce ID and prediction columns\n fold_predictions.columns = ['sf_account_id'] + [x[11:] for x in prediction_columns] # remove predicted_ from column names\n \n # write to S3\n\n now_timestamp = str(pd.Timestamp.now()).split(\".\")[0]\n output_object = f'{output_path}propensity_{now_timestamp}.csv'\n csv_string = fold_predictions.to_csv(index=False)\n\n if 's3' in output_path:\n fs = s3fs.S3FileSystem()\n with fs.open(output_object, 'wb') as f:\n f.write(csv_string.encode())\n else:\n with open(output_object, 'wb') as f:\n f.write(csv_string.encode())\n\n return output_object", "def write_csv(output_filename, filenames, truncate=None):\n num_tokens = defaultdict(list)\n with open(output_filename, 'w') as fout:\n fout.write('label,sentence\\n')\n for filename in tqdm(filenames):\n sentiment = int('pos' in filename)\n # sentiment = int(os.path.basename(filename)[:-4].split(\"_\")[1])\n\n with open(filename) as f:\n doc = f.read()\n # Remove HTML\n soup = BeautifulSoup(doc)\n doc = soup.get_text()\n\n if truncate:\n doc = \" \".join(doc.split()[:truncate])\n\n num_tokens[sentiment].append(len(doc.split()))\n fout.write(\"{},{}\\n\".format(sentiment, doc))\n\n for k, v in num_tokens.items():\n print(\"Sentiment {}: Count: {:<10,} Tokens Mean: {:<10,.2f} Min: {:<5} Max: {}\".format(\n k, len(v), np.mean(v), np.min(v), np.max(v)))", "def save_ndarray_to_csv(docs_array, labels_array, csv_file):\n processed_array = np.vstack([labels_array, docs_array]).T\n df = pd.DataFrame(data=processed_array)\n df.to_csv(csv_file, index=False, header=['label', 'text'])", "def save_predictions(path: str, wrapper, results: Dict):\n predictions_with_idx = []\n\n if wrapper.task_helper and wrapper.task_helper.output:\n predictions_with_idx = wrapper.task_helper.output\n else:\n inv_label_map = {idx: label for label,\n idx in wrapper.label_map.items()}\n for idx, prediction_idx in zip(results['indices'], results['predictions']):\n prediction = inv_label_map[prediction_idx]\n idx = idx.tolist() if isinstance(idx, np.ndarray) else int(idx)\n predictions_with_idx.append({'idx': idx, 'label': prediction})\n\n with open(path, 'w', encoding='utf8') as fh:\n for line in predictions_with_idx:\n fh.write(json.dumps(line) + '\\n')", "def save_prediction(self):\n if DataLoader.data is None:\n messagebox.showerror(\"Information\", \"Data file is empty, please load the data first.\")\n return\n if Trainer.y_pred is None:\n messagebox.showerror(\"Information\", \"Preciction has not been made, please train a new model and predict or \"\n \"load a model and predict.\")\n return\n\n path = filedialog.asksaveasfile(mode='w', defaultextension=\".csv\", filetypes=[(\"csv files\", '*.csv'),\n (\"xlsx files\", '*.xlsx'),\n (\"dat files\", '*.dat')])\n\n copy_data = DataLoader.data.copy()\n copy_data['prediction'] = Trainer.y_pred\n copy_data.to_csv(path, index=False)\n\n # Clears memory\n copy_data.drop(copy_data.index, inplace=True)\n del copy_data", "def write_submission(ratings, file_name):\n # Build output string to write into the file\n output = \"Id,Prediction\\n\"\n for (row, col, rat) in ratings:\n # every line is of the format 'rX_cY,R' where X and Y correspond to row(user) and column(movie) indices and R is the rating\n # we have do increase row and col by one because numpy arrays use 0-base indexing while movie/user indices start at 1\n output += \"r%d_c%d,%f\\n\" % (row + 1, col + 1, rat)\n \n # Write file \n with open(os.path.join('../predictions_csv', file_name), 'w') as output_file:\n output_file.write(output)\n \n return output", "def write_out_prediction(predictions_file, src_seqs,\n trg_seqs, pred_string, src_feat_bundles,\n trg_feat_bundles, val_id):\n\n output_lines = []\n if trg_seqs[val_id] != pred_string:\n output_lines.append('*ERROR*')\n output_lines.append('SRC: {}'.format(src_seqs[val_id]))\n if src_feat_bundles[val_id]:\n output_lines.append('SFT: {}'.format(src_feat_bundles[val_id]))\n if trg_feat_bundles[val_id]:\n output_lines.append('TFT: {}'.format(trg_feat_bundles[val_id]))\n output_lines.append('TRG: {}'.format(trg_seqs[val_id]))\n output_lines.append('PRD: {}\\n'.format(pred_string))\n predictions_file.write('{}\\n'.format('\\n'.join(output_lines)))", "def predict(self):\n train_vec, test_vec = self.get_tfidf_vectors()\n clf = self.get_classifier()\n\n print '-'*40\n print 'Making predictions ...'\n clf.fit(train_vec, self.train_ans)\n clf_predictions = clf.predict_proba(test_vec)\n\n print 'Storing predictions in', self.pred_file\n pred_out = [\"Id,predictions\"]\n num_pred = range(30)\n for fid, pred in zip(self.test_index, clf_predictions):\n top_rec = sorted(num_pred, key=lambda k: pred[k], reverse=True)[:3]\n pred_out.append(\"%s,%s\" % (fid, ' '.join( [clf.classes_[rec] for rec in top_rec] )))\n with open(self.pred_file, 'w') as f:\n f.write('%s\\n' % ('\\n'.join(pred_out)))", "def write_features_to_csv(pairs, features, filename):\n\tids = []\n\n\tfor pair in pairs:\n\t\tids.append(pair.id)\n\n\tfeatures_dataframe = pd.DataFrame(features)\n\tfeatures_dataframe.insert(0, column=\"ID\", value=ids)\n\tfeatures_dataframe.to_csv(filename, index=False)", "def log_history_to_csv_file(metaparams, history, csv_file):\n \n csv_writer = csv.writer(csv_file, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n for i in range(len(history.history['loss'])):\n training_perfs = [i+1, history.history['loss'][i], history.history['acc'][i], history.history['val_loss'][i], history.history['val_acc'][i]]\n csv_writer.writerow(metaparams + training_perfs)\n return;", "def save_csv(self, filename): # DONE\n self.data.to_csv(filename)", "def write_output(series, filename):\n\n logging.info('Writing output')\n\n df = series.reset_index()\n\n df.columns = ['subject_id', 'classification']\n\n df.to_csv(filename, index=False)", "def save(self, data, outpath):\n data.to_csv(outpath)", "def export_to_pairwise_csv(request, token, project):\n from appraise.local_settings import EXPORT_TOKEN\n if not token == EXPORT_TOKEN:\n return HttpResponseForbidden()\n \n annotation_project = get_object_or_404(Project, name=project)\n \n queryset = RankingResult.objects.filter(item__hit__completed=True)\n\n results = [u'srclang,trglang,srcIndex,segmentId,judgeId,' \\\n 'system1Id,system1rank,system2Id,system2rank,rankingID']\n \n for result in queryset:\n if isinstance(result, RankingResult):\n if result.item.hit.project_set.filter(id=annotation_project.id):\n current_csv = result.export_to_pairwise_csv()\n if current_csv is None:\n continue\n results.append(current_csv)\n \n export_csv = u\"\\n\".join(results)\n export_csv = export_csv + u\"\\n\"\n return HttpResponse(export_csv, mimetype='text/plain')", "def dwn_saved_result_csv(request):\n source_id = request.GET.get('source_id')\n data = []\n objs = ExtractedRelation.objects.filter(source=source_id)\n s = Source.objects.filter(source_id=source_id)[0]\n for i in objs:\n data.append((i.sentence, i.head, i.tail, i.pred_relation, i.sentiment, i.conf, s.source, i.rel_id, os.path.basename(i.ckpt)))\n \n df = pd.DataFrame(data, columns=['Sentence', 'Head', 'Tail', 'Predicted Relation', 'Predicted Sentiment', 'Confidence', 'Source', 'rel_id', 'Checkpoint'])\n df.to_csv(\"temp/analysis_results.csv\", index=False)\n \n return FileResponse(open('temp/analysis_results.csv','rb'))", "def generate_csv(lists, output_file):\n if os.path.isfile(output_file):\n with open(output_file, 'a') as file:\n dataset = tablib.Dataset()\n for l in lists:\n dataset.append([l['Original ASIN'], l['Associated ASIN'], l['Title'], l['Price'], l['Currency Code'], l['Relationship']])\n file.write(dataset.csv)\n else:\n with open(output_file, 'w+') as fp:\n dataset = tablib.Dataset(headers=['Original ASIN', 'Associated ASIN', 'Title', 'Price', 'Currency Code', 'Relationship'])\n for l in lists:\n dataset.append([l['Original ASIN'], l['Associated ASIN'], l['Title'], l['Price'], l['Currency Code'], l['Relationship']])\n fp.writelines(dataset.csv)", "def save_csv_file(votes: dict) -> None:\r\n with open(\"votingList.csv\", \"w\", newline=\"\") as csvfile:\r\n writer = csv.writer(csvfile)\r\n writer.writerow([\"First Name\", \"Last Name\", \"Vote\"])\r\n for vote in votes.keys():\r\n entry = votes[vote]\r\n fst, snd = vote.split()\r\n writer.writerow([fst, snd, entry])", "def write_pred_kaggle_file(cls, outfname, speech):\n yp = cls.predict(speech.test_doc_vec)\n labels = speech.le.inverse_transform(yp)\n f = codecs.open(outfname, 'w')\n f.write(\"FileIndex,Category\\n\")\n for i in range(len(speech.test_fnames)):\n fname = speech.test_fnames[i]\n f.write(fname + ',' + labels[i] + '\\n')\n f.close()", "def save_csv_data(representatives, votes, house, session):\n df = pd.DataFrame({'Representatives': representatives})\n for bill in votes:\n vote_record = []\n for person in representatives:\n if person in bill['vote_yea']:\n vote_record.append(1)\n elif person in bill['vote_nay']:\n vote_record.append(0)\n else:\n vote_record.append(2)\n df[bill['bill']] = vote_record\n df.to_csv(os.path.join(default_path, \"voting\", house+str(session)+'_voting.csv'))", "def save_submission(results, file_name='submission.csv'):\n submission_path = path.join('..', 'output', file_name)\n results.to_csv(submission_path)", "def write(self, predictions, filename):\n driver = self.dataset.GetDriver()\n dst_ds = driver.CreateCopy(filename, self.dataset)\n\n prediction_array = np.zeros_like(self.segmentation)\n for prediction, y, x in predictions:\n prediction_array[y:y + self.size, x:x + self.size] = prediction\n\n # Overwrite the raster band with the predicted labels\n band = dst_ds.GetRasterBand(1)\n band.WriteArray(prediction_array)", "def csv_output(self):\r\n fh = open(\"output.csv\",'w')\r\n for i in range(len(self.population.columns)):\r\n if i != len(self.population.columns)-1:\r\n fh.write(str(self.population.columns[i]))\r\n fh.write(\",\")\r\n else:\r\n fh.write(str(self.population.columns[i]))\r\n fh.write(\"\\n\")\r\n\r\n for i in range(len(self.population.data)):\r\n for j in range(len(self.population.data[i])):\r\n if j != len(self.population.data[i])-1:\r\n fh.write(str(self.population.data[i][j]))\r\n fh.write(\",\")\r\n else:\r\n fh.write(str(self.population.data[i][j]))\r\n fh.write(\"\\n\")\r\n fh.close()", "def output_predictions(pipeline):\n ##### Write code here #######\n X_train, y_train_true = load_data_file(TRAIN_FILE)\n X_dev, y_dev_true = load_data_file(DEV_FILE)\n X_test, y_test_true = load_data_file(TEST_FILE)\n\n #train pipeline with dev and train file\n pipeline.fit(X=X_train, y=y_train_true)\n pipeline.fit(X=X_dev, y=y_dev_true)\n\n y_pred_test = pipeline.predict(X=X_test)\n\n df = pd.DataFrame(y_pred_test)\n with open('predictions.tsv', 'w'):\n df.to_csv('predictions.tsv', sep='\\t', index=False, header=False)\n ##### End of your work ######", "def dwn_analysis_csv(request):\n data = []\n for i in results:\n data.append((i['sentence'], i['head'], i['tail'], i['pred_relation'], i['sent'], i['conf']))\n df = pd.DataFrame(data, columns=['Sentence', 'Head', 'Tail', 'Predicted Relation', 'Predicted Sentiment', 'Confidence'])\n df.to_csv(\"temp/analysis_results.csv\", index=False)\n \n return FileResponse(open('temp/analysis_results.csv','rb'))", "def write_to_csv(self, log_dir, run_dir, hmc=False):\n _, run_str = os.path.split(run_dir)\n avg_data = {\n 'log_dir': log_dir,\n 'run_dir': run_str,\n 'hmc': hmc,\n }\n\n for key, val in dict(sorted(self.data.items())).items():\n tensor = tf.convert_to_tensor(val)\n arr, steps = therm_arr(tensor.numpy(), therm_frac=0.2)\n if 'steps' not in avg_data:\n avg_data['steps'] = len(steps)\n avg_data[key] = np.mean(arr)\n\n # avg_data[key] = tf.reduce_mean(arr)\n\n avg_df = pd.DataFrame(avg_data, index=[0])\n outdir = os.path.join(BASE_DIR, 'logs', 'GaugeModel_logs')\n csv_file = os.path.join(outdir, 'inference.csv')\n head, tail = os.path.split(csv_file)\n io.check_else_make_dir(head)\n io.log(f'Appending inference results to {csv_file}.')\n if not os.path.isfile(csv_file):\n avg_df.to_csv(csv_file, header=True, index=False, mode='w')\n else:\n avg_df.to_csv(csv_file, header=False, index=False, mode='a')", "def write_results(results):\n fields = results[0].keys()\n with open('results.csv', 'w') as f:\n dw = csv.DictWriter(f, fieldnames=fields, delimiter='|')\n dw.writer.writerow(list(dw.fieldnames))\n dw.writerows(results)", "def outputFunc(filename, resultList):\n #assert len(parks) == 3\n \n f = open(filename, 'wt')\n \n try:\n writer = csv.writer(f)\n for i in range(len(resultList)):\n print resultList[0]\n writer.writerow(resultList[i])\n \n finally:\n f.close()", "def submission(test_ids, pred_test, file_name):\n pred_test[pred_test < 0] = 0\n\n val_pred_df = pd.DataFrame(data={'fullVisitorId': test_ids,\n 'predictedRevenue': pred_test})\n\n val_pred_df = val_pred_df.groupby('fullVisitorId').sum().reset_index()\n\n val_pred_df.columns = ['fullVIsitorId', 'predictedLogRevenue']\n val_pred_df['predictedLogRevenue'] = val_pred_df['predictedLogRevenue']\n val_pred_df.to_csv('submission/'+file_name, index=False)", "def save_results(self, results, file_name, file_type):\n if file_type == 'csv':\n csv_filename = '{}.csv'.format(file_name)\n\n with open(csv_filename, 'w', newline='') as file:\n writer = csv.writer(file)\n writer.writerows(results)", "def write_to_file(test_data: pd.DataFrame, estimator: Estimator,\n file_name: str):\n\n # we only need x_test\n x_test, y_test = estimator.split_model_data(test_data)\n\n # have the estimator return its predictions\n predictions = estimator.forecast(x_test)\n\n if predictions is None:\n sys.stdout.write(f\"Estimator returned no forecast,\"\n f\" has it been trained?\\n\")\n raise RuntimeError\n\n # slice the timestamps from given dataframe\n timestamps = test_data.loc[:, test_data.columns[0]].to_numpy()\n timestamps = np.array(timestamps, dtype='datetime64[s]')\n\n with open(file_name, mode=\"w\", newline=\"\") as output_file:\n # initialize csv writer\n prediction_writer = csv.writer(output_file,\n delimiter=\",\",\n quotechar=\"'\",\n quoting=csv.QUOTE_MINIMAL)\n\n # add header line\n prediction_writer.writerow([test_data.columns[0],\n test_data.columns[1]])\n\n #\n for i in range(len(timestamps)):\n timestamp = str(timestamps[i])\n prediction = str(round(predictions[i], 6))\n prediction_writer.writerow([timestamp, prediction])\n\n return", "def write_csv(image_names, image_classes, filename):\n with open(filename, 'w', newline='') as file:\n writer = csv.writer(file)\n writer.writerow(['filename', 'label'])\n writer.writerows(zip(image_names, image_classes))", "def write_classifier_output(location, folds, labels, predictions, class_probs, names=None):\n with open(os.path.join(location, '-'.join([\"classifier\", \"fold\", \"predictions\"]) + '.txt'), 'w') as out_file:\n for fold in range(folds):\n out_file.write(\"fold \" + str(fold+1) + ':\\n')\n out_file.write(\"{:50} {:<12} {:<12} {:<9} {:<9}\\n\".format(\"recording\", \"prediction\", \"label\", \"class 0\",\n \"class 1\"))\n fold_labels, fold_predictions, fold_class_probs = labels[fold], predictions[fold], class_probs[fold]\n\n if names is not None and len(names) != 0:\n fold_names = np.hstack(names[fold])\n else:\n fold_names = len(fold_predictions) * ['']\n\n for pred_lab_tuple in zip(fold_names, fold_predictions, fold_labels, fold_class_probs[:, 0],\n fold_class_probs[:, 1]):\n (name, pred, label, prob1, prob2) = pred_lab_tuple\n out_file.write(\"{:50} {:<12} {:<12} {:<9.2f} {:<9.2f}\\n\".format(name, pred, label, prob1, prob2))\n out_file.write('\\n')", "def output():\n\n if args.top and not args.tfidf and not args.svd:\n most_frequent(vector).to_csv(path_or_buf=\"top{}_vectorfile.csv\".format(args.top))\n\n elif args.top and args.tfidf and not args.svd:\n tfidf_transform(most_frequent(vector)).to_csv(path_or_buf=\"tfidf_top{}.csv\".format(args.top))\n\n elif args.top and args.tfidf and args.svd:\n svd_transform(tfidf_transform(most_frequent(vector)), indexes).to_csv(path_or_buf=\"svd{}_tfidf_topn.csv\".format(args.svd))\n\n elif args.tfidf and not args.top and not args.svd:\n tfidf_transform(vector).to_csv(path_or_buf=\"tfidf.csv\")\n\n elif args.svd and not args.top and not args.tfidf:\n svd_transform(vector, indexes).to_csv(path_or_buf=\"svd{}_vector.csv\".format(args.svd))\n\n elif args.tfidf and args.svd and not args.top:\n svd_transform(tfidf_transform(vector), indexes).to_csv(path_or_buf=\"svd{}_tfidf.csv\".format(args.svd))\n\n else:\n vector.to_csv(path_or_buf=\"vectorfile.csv\")", "def predict(classifier, data):\n print(\"Beggining to classify data\")\n results = classifier.predict(data)\n results = pd.DataFrame(results)\n results.index += 1\n results.to_csv(\"out/results.csv\", header=[\"Label\"], index=True, index_label=[\"ImageId\"])\n print(\"Finished classifying data\")", "def write_predictions(self, predictions, file_path=None, is_dict=True, pycm_obj=None):\n\n try:\n super(SequenceClassification, self).write_predictions(\n predictions, file_path=file_path, is_dict=is_dict\n )\n except AttributeError:\n # TODO: Need to Fix\n model_base = ModelBase()\n model_base._log_dir = self._log_dir\n model_base._train_counter = self._train_counter\n model_base.training = self.training\n model_base.write_predictions(predictions, file_path=file_path, is_dict=is_dict)\n\n data_type = \"train\" if self.training else \"valid\"\n\n if pycm_obj is not None:\n stats_file_path = f\"predictions-{data_type}-{self._train_counter.get_display()}-stats\"\n pycm_obj.save_csv(str(Path(self._log_dir) / \"predictions\" / stats_file_path))\n\n confusion_matrix_file_path = (\n f\"predictions-{data_type}-{self._train_counter.get_display()}-confusion_matrix\"\n )\n cls_utils.write_confusion_matrix_to_csv(\n str(Path(self._log_dir) / \"predictions\" / confusion_matrix_file_path), pycm_obj\n )", "def exportFoldFile(vectors, authors, fileName):\n with open(fileName, \"w\") as fFile:\n for idv, vec in enumerate(vectors):\n [fFile.write(str(val)+',') for val in vec]\n fFile.write(authors[idv] + '\\n')" ]
[ "0.7925112", "0.75827205", "0.75710183", "0.75710183", "0.7489846", "0.74501824", "0.73730016", "0.7322112", "0.70706344", "0.6982982", "0.69460446", "0.6911982", "0.6833308", "0.6815102", "0.67553407", "0.6745336", "0.6745336", "0.6745336", "0.6745336", "0.6745336", "0.6745336", "0.6735976", "0.67324305", "0.67095983", "0.6690879", "0.6612334", "0.6602958", "0.6601568", "0.65663135", "0.6558502", "0.65581733", "0.65417206", "0.6535028", "0.6519499", "0.6509373", "0.6495915", "0.648198", "0.64151376", "0.64099944", "0.6405993", "0.6395074", "0.6384059", "0.6362279", "0.6359811", "0.6357627", "0.63564426", "0.634721", "0.63367504", "0.62852734", "0.6285116", "0.6259409", "0.6254705", "0.6253172", "0.6248515", "0.62415016", "0.6219533", "0.62152696", "0.619938", "0.6195651", "0.6194581", "0.6192136", "0.6189015", "0.61690325", "0.6163753", "0.61608064", "0.61601233", "0.6150573", "0.6143036", "0.6140446", "0.6136853", "0.6133349", "0.613281", "0.6123718", "0.6120118", "0.61128205", "0.611245", "0.6111941", "0.6109166", "0.6104979", "0.6102238", "0.6101714", "0.6097939", "0.6094107", "0.60673153", "0.6063369", "0.60576755", "0.6049206", "0.60410124", "0.60350573", "0.6034096", "0.60279715", "0.60265666", "0.6022835", "0.6018383", "0.6015225", "0.5999805", "0.5997553", "0.5989992", "0.5979374", "0.59782135" ]
0.78001994
1
initialization routine for Chiplot, sets class variables
def __init__(self, xdata = list(), ydata = list(), filename = None, projection = ''): dlog('in chiplot initialization') self.xdata = xdata self.ydata = ydata self.xmax = None self.ymax = None self.xmin = None self.ymin = None self.increment = 0 self.filename = filename self.smv = '' self.points = 0 self.projection = projection # process data if given lastx = -1 increment = 0 dlog('len of x'+str(len(xdata))) dlog('len of y'+str(len(ydata))) for i in range(0,len(ydata)): if(self.xmax == None): self.xmax = xdata[i] self.xmin = xdata[i] self.ymax = ydata[i] self.ymin = ydata[i] # determine ranges for axes if( xdata[i] > self.xmax): self.xmax = xdata[i] elif( xdata[i] < self.xmin): self.xmin = xdata[i] if( ydata[i] > self.ymax): self.ymax = ydata[i] elif( ydata[i] < self.ymin): self.ymin = ydata[i] # make sure that the chiplot increments steadily upward or downward if(xdata[i] < lastx and increment > 0): dlog('Error: Chiplot does not increment its data points normally', 'l') elif(xdata[i] > lastx and increment < 0): dlog('Error: Chiplot does not increment its data points normally', 'l') elif(lastx != -1 and increment == 0): increment = xdata[i] - lastx dlog('incrementing by: '+str(increment), 'd') lastx = xdata[i] # add point to storage self.points = self.points + 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize(cls):", "def initialise(self):", "def _init(self):\n pass", "def init(self) -> None:", "def __init_accessors (self):\n self.colors = ay.utils.Colors\n self.layout = Layout(self.seed)\n self.shapes = Shapes", "def init(self):", "def init(self):", "def init(self) -> None:\n ...", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init():", "def init_instance_attrs(self):\n super(CoconutShell, self).init_instance_attrs()\n self.compile = CoconutCompiler()", "def _init(self):", "def __init__():", "def initialize(self, cwrap):\n pass", "def __init__(self):\n ChipData.ChipData.__init__(self)", "def initialize(self, **kwargs):", "def initialize(self):\n\t\tpass", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def _init(self):\n raise NotImplementedError", "def setup_class(self):\n class SubCosmology(Cosmology):\n\n H0 = Parameter(unit=u.km / u.s / u.Mpc)\n Tcmb0 = Parameter(unit=u.K)\n\n def __init__(self, H0, Tcmb0=0*u.K, name=None, meta=None):\n super().__init__(name=name, meta=meta)\n self._H0 = H0\n self._Tcmb0 = Tcmb0\n\n self.cls = SubCosmology\n self.cls_args = (70 * (u.km / u.s / u.Mpc), 2.7 * u.K)\n self.cls_kwargs = dict(name=self.__class__.__name__, meta={\"a\": \"b\"})", "def __init__(self):\n self._read_calibration_data()\n self.configure_sensor(\n TemperatureOversamplings.x08,\n PressureOversamplings.x16,\n HumidityOversamplings.x08,\n IIRFilterCoefficients.FC_003,\n 250,\n 250)", "def initialize(self): \r\n pass", "def __init__(self):\n\n self.plugboard = None\n self.rotors = []\n self.reflector = None", "def initialize(self):\r\n pass", "def initialize(self):\r\n pass", "def __init__(self):\n\n #: Dict[str, Any]: Experiment metadata\n self.__experiment_metadata = None\n\n #: List[CurveData]: Processed experiment data set.\n self.__processed_data_set = list()\n\n #: Backend: backend object used for experimentation\n self.__backend = None\n\n # Add expected options to instance variable so that every method can access to.\n for key in self._default_options().__dict__:\n setattr(self, f\"__{key}\", None)\n\n # Add fixed parameters to instance variable so that every method can access to.\n for key in self.__fixed_parameters__:\n setattr(self, f\"__{key}\", None)", "def init(self):\n raise NotImplementedError", "def init(self):\n raise NotImplementedError", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def __init__ (self):\n pass", "def __init__(self, nc_measure: NCMeasure, clf) -> None:\n self._nc_measure = nc_measure # Non-conformity measure\n self._clf = clf # Classifier\n self._cal_l = None # Calibration labels\n self._cal_a = None # Calibration alphas", "def __init__(self):\n self._tyrannosaurus = []\n self._triceratops = []", "def __init__(self):\n self.TECRDB_compounds_data_dict = {}\n self.TECRDB_compounds_pH7_species_id_dict = {}\n self.TECRDB_compounds_least_H_sid_dict = {}\n self.get_TECRDB_compounds_data()", "def __init__(self):\n super().__init__(\"ccx\", 3, [])", "def __init__(self):\n self._read_calibration_data()\n self.set_oversamplings_and_mode(\n HumidityOversampling.x08,\n TemperatureOversampling.x08,\n PressureOversampling.x16,\n SensorMode.Normal)\n self.set_config(\n InactiveDuration.ms1000,\n FilterCoefficient.fc04)", "def __init__(self, coord_sys='Cartesian'):\r\n self.name = None \r\n self.chem = None\r\n self.nSpecies = None\r\n self.conc = np.array([0])\r\n self.Kw = None\r\n self.D = None \r\n self.x_coord = None\r\n self.y_coord = None\r\n self.dx = None\r\n self.dy = None\r\n self.dz = None\r\n self.coord_sys = coord_sys", "def __init__(self, set_args, load_sensor_names,\n sensor_names, \n cnt_preprocessors, marker_def):\n self.__dict__.update(locals())\n del self.self\n if self.load_sensor_names == 'all':\n self.load_sensor_names = None", "def __init__(self):\n super().__init__('drvr_06')\n self.comp = SimComp_6()", "def x_init(self):\n pass", "def __init__(self, make, model, year):\r\n super().__init__(make, model, year)\r\n self.battery_size = 70\r\n # self.autopilot = autopilot\r", "def setup_class(self):\n self.iqcalc = iqcalc_astropy.IQCalc(logger=self.logger)", "def initialize(self) -> None:\n pass", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def initialise(self, **kwargs):\n pass", "def __init__(self):\n self.__dataset = None", "def __init__(self) -> None:\n # TODO: Provide the complete constructor for this object", "def initialize(self, *args, **kwargs):", "def init(self, parameters):\n pass", "def _set_init(self):\n ## Main information\n self.idxs = None\n self.sp_relative_pos = None\n ## Auxiliar information\n self.ks = None\n self.iss = [0]\n ## Class structural information\n self._setted = False\n self._constant_rel_pos = False\n self.staticneighs = None\n self.staticneighs_set = None", "def __init__ (self) :", "def __init__(self, connection_loc, tags_tsdata,\n dummy_var_no):\n self.init_system(connection_loc, tags_tsdata)\n self.removedummyvars(dummy_var_no)\n self.addforwardscale()\n self.addbackwardscale()", "def do_init(self):\n\n pass", "def __init__(self, coresys: CoreSys):\n self.coresys = coresys", "def __init__(self, coresys: CoreSys):\n self.coresys = coresys", "def __init__(self, coresys: CoreSys):\n self.coresys = coresys", "def __init__(self, coresys: CoreSys):\n self.coresys = coresys", "def __init__(self, coresys: CoreSys):\n self.coresys = coresys", "def __init__(self, coresys: CoreSys):\n self.coresys = coresys", "def __init__(self, coresys: CoreSys):\n self.coresys = coresys", "def __init__(self, coresys: CoreSys):\n self.coresys = coresys", "def __init__(self, coresys: CoreSys):\n self.coresys = coresys", "def initialize(self,*args,**kwargs):\n self.__instrumentID = c_uint32(0) \n self.__numInstruments = c_uint32()\n self.__nbrOfChannels = c_uint32()\n self.__nbrADCBits = c_uint32()\n self.__temperature = c_int32()\n self.__time_us = c_double()\n\n self.loadDLLs(**kwargs) # Load the different DLLs or DLL based modules\n self.reinit() # init or reinit the board\n self.createDictAndGlobals() # create dictionaries and global variables\n self.nbrOfChannels=int(self.__nbrOfChannels.value) # duplicate self.nbrOfChannels in a Python type variable \n self.getInitialConfig()", "def init(self, *args, **kwds):\n pass", "def initialize(self):\n pass", "def __init__(self, canvas):\r\n\r\n # Initialize attributes\r\n self.canvas = canvas\r\n self.fig = canvas.fig\r\n self.units = None\r\n self.cb = None\r\n self.cb_bt = None\r\n self.cb_gga = None\r\n self.cb_vtg = None\r\n self.bt = None\r\n self.gga = None\r\n self.vtg = None\r\n self.hover_connection = None\r\n self.annot = None", "def __init__(self):\n super().__init__()\n self.p = 0.0\n self.type = 'Bernoulli'\n self.distType = 'Discrete'\n self.lowerBound = 0.0\n self.upperBound = 1.0\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def init():\n pass", "def __init__(self):\n self.tape_tag = None\n self.independentVariableShapeList = []\n self.dependentVariableShapeList = []", "def __init__(self):\n self._initialized = False\n self.init()", "def __init__(self):\n self._initialized = False\n self.init()", "def __init__(self):\r\n\r\n self.Helpers = Helpers(\"Movidius\")\r\n self.confs = self.Helpers.confs\r\n\r\n self.classes = []\r\n self.ncsGraph = None\r\n self.ncsDevice = None\r\n self.reqsize = None\r\n\r\n self.mean = 128\r\n self.std = 1 / 128\r\n\r\n #mvnc.SetGlobalOption(mvnc.GlobalOption.LOG_LEVEL, 2)\r\n\r\n self.Helpers.logger.info(\"Movidius class initialization complete.\")", "def initialize(self, **kwargs: Any) -> None:\n pass", "def __init__(self, Q_sys, C_sys, C_stock):\n self._Q_sys = Q_sys\n self._C_sys = C_sys\n self._C_stock = C_stock", "def __init__(self):\n self._create_options()\n self._create_sections()", "def initialize(self, *args, **kwargs):\n pass", "def __init__(self):\n super().__init__()\n self.p = 0.0\n self.type = 'Geometric'\n self.distType = 'Discrete'\n self.lowerBound = 0.0\n self.upperBound = 1.0\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def initialize(self):\n pass # pragma: no cover", "def __init__(self, verbose):\n self.modules = maus_cpp.globals.get_monte_carlo_mice_modules()\n self.verbose = verbose", "def __init__(self):\n parameters_list = []\n self.config_dict = self.open_config(parameters_list)\n\n # Define defaults\n self.disc_gt = 0.0\n self.disc_out = 0.0", "def __init__(self):\n self.machines = {}\n self.configs = {}\n self.systems = {}\n self.jobs = {}\n self.benchmarks = {}\n self.projects = {}", "def __init__(self):\n\n self.logger = utils.get_logger()\n\n # set constants\n constants = models.get_asset_dicts('preferences')\n for key, value in constants.items():\n setattr(self, key, value)", "def _plot_init(self):\n pass", "def _plot_init(self):\n pass", "def __init__(self, **kwds):\n raise NotImplementedError" ]
[ "0.705675", "0.69822073", "0.69724643", "0.69628584", "0.6953671", "0.6945866", "0.6945866", "0.69433916", "0.69258225", "0.69258225", "0.69258225", "0.69258225", "0.69258225", "0.69258225", "0.69258225", "0.69258225", "0.6895559", "0.68572617", "0.6853776", "0.6848816", "0.6839878", "0.6829869", "0.68288934", "0.6766515", "0.6750715", "0.6750715", "0.6750715", "0.6750715", "0.6737793", "0.6736068", "0.6721077", "0.6707387", "0.6690871", "0.66819334", "0.66819334", "0.66535485", "0.6648761", "0.6648761", "0.6632673", "0.6632673", "0.6632673", "0.6632673", "0.6632673", "0.6617402", "0.66100013", "0.6595159", "0.65733564", "0.65561295", "0.65560704", "0.65447646", "0.65409595", "0.6534781", "0.653289", "0.65254575", "0.6523842", "0.6499733", "0.6482943", "0.6482943", "0.6482943", "0.6480662", "0.6480362", "0.6470308", "0.6468605", "0.6466192", "0.64656687", "0.6458992", "0.6444356", "0.6443393", "0.6435763", "0.6435763", "0.6435763", "0.6435763", "0.6435763", "0.6435763", "0.6435763", "0.6435763", "0.6435763", "0.643434", "0.64315957", "0.6430917", "0.64268726", "0.64260274", "0.6418523", "0.6414152", "0.6414091", "0.6414091", "0.6407498", "0.64069986", "0.64067775", "0.64064175", "0.6404379", "0.6403149", "0.64007246", "0.63933915", "0.6391665", "0.6388414", "0.6385806", "0.63852435", "0.63852435", "0.638216" ]
0.64347374
77
This functions check's whether a text contains contractions or not. In case a contraction is found, the corrected value from the dictionary is returned.
def replace_contractions(self, text, lower=False): # replace words with contraction according to the contraction_dict if lower: contraction_dict = self.contraction_dict_lower else: contraction_dict = self.contraction_dict if text.strip() in contraction_dict.keys(): text = contraction_dict[text.strip()] # replace words with "'ve" to "have" matches = re.findall(r'\b\w+[\'`´]ve\b', text) if len(matches) != 0: text = re.sub(r'[\'`´]ve\b', " have", text) # replace words with "'re" to "are" matches = re.findall(r'\b\w+[\'`´]re\b', text) if len(matches) != 0: text = re.sub(r'[\'`´]re\b', " are", text) # replace words with "'ll" to "will" matches = re.findall(r'\b\w+[\'`´]ll\b', text) if len(matches) != 0: text = re.sub(r'[\'`´]ll\b', " will", text) # replace words with "'m" to "am" matches = re.findall(r'\b\w+[\'`´]m\b', text) if len(matches) != 0: text = re.sub(r'[\'`´]m\b', " am", text) # replace words with "'d" to "would" matches = re.findall(r'\b\w+[\'`´]d\b', text) if len(matches) != 0: text = re.sub(r'[\'`´]d\b', " would", text) # replace all "'s" by space matches = re.findall(r'\b\w+[\'`´]s\b', text) if len(matches) != 0: text = re.sub(r'[\'`´]s\b', " ", text) return text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def replace_contractions(text):\r\n return contractions.fix(text)", "def _replace_contractions(text):\n return contractions.fix(text)", "def replace_contractions(text):\n return contractions.fix(text)", "def replace_contractions(text):\n return contractions.fix(text)", "def expand_contractions(text):\r\n text = re.sub(\r\n r\"(\\b)([Aa]re|[Cc]ould|[Dd]id|[Dd]oes|[Dd]o|[Hh]ad|[Hh]as|[Hh]ave|[Ii]s|[Mm]ight|[Mm]ust|[Ss]hould|[Ww]ere|[Ww]ould)n't\",\r\n r\"\\1\\2 not\", text)\r\n text = re.sub(r\"(\\b)([Hh]e|[Ii]|[Ss]he|[Tt]hey|[Ww]e|[Ww]hat|[Ww]ho|[Yy]ou)'ll\", r\"\\1\\2 will\", text)\r\n text = re.sub(r\"(\\b)([Tt]hey|[Ww]e|[Ww]hat|[Ww]ho|[Yy]ou)'re\", r\"\\1\\2 are\", text)\r\n text = re.sub(r\"(\\b)([Ii]|[Ss]hould|[Tt]hey|[Ww]e|[Ww]hat|[Ww]ho|[Ww]ould|[Yy]ou)'ve\", r\"\\1\\2 have\", text)\r\n\r\n text = re.sub(r\"(\\b)([Cc]a)n't\", r\"\\1\\2n not\", text)\r\n text = re.sub(r\"(\\b)([Ii])'m\", r\"\\1\\2 am\", text)\r\n text = re.sub(r\"(\\b)([Ll]et)'s\", r\"\\1\\2 us\", text)\r\n text = re.sub(r\"(\\b)([Ww])on't\", r\"\\1\\2ill not\", text)\r\n text = re.sub(r\"(\\b)([Ss])han't\", r\"\\1\\2hall not\", text)\r\n text = re.sub(r\"(\\b)([Yy])(?:'all|a'll)\", r\"\\1\\2ou all\", text)\r\n\r\n return text", "def replaceContractions(self, text):\n\t\treturn contractions.fix(text)", "def preProcess(text):\n\ttext = text.lower() # lower case the text\n\t# Q4 replace the word with expanded contractions\n\tfor k,v in general_contraction.items():\n\t\tif k in text.split():\n\t\t\ttext = text.replace(k,v)\n\t# Q4 remove speacial char including all puncuattions and replace it with a space\n\ttext = re.sub('[^A-Za-z0-9]+',' ',text) \n\t# tokenise\n\ttokens = text.split()\n\t# stop word removal\n\ttokens = [w for w in tokens if w not in stopwords ]\n\t# Q4 Stemming\n\ttokens = [str(porter.stem(w)) for w in tokens]\n\t# if word is non-english return its english form # too much time-complexity\n\t# tokens = [porter.stem(w) if porter.stem(w) in set(words.words()) else w for w in tokens ]\n\t# for words having digits such as 12gb, 1st, etc expanding the token list\n\tfor k in tokens:\n\t\tif len(k) >2 and re.match(r'[0-9]+',k):\t\t\t\n\t\t\tif len(k) >2 and not k.isdigit():\n\t\t\t\tl = re.split(r'(\\d+)',k)\n\t\t\t\tl = [w for w in l if w is not '' ]\n\t\t\t\tif l and len(l) <= 3:\n\t\t\t\t\tfor i in l:\n\t\t\t\t\t\tif i in digit_contractions.keys():\n\t\t\t\t\t\t\tl = list(map(lambda b: b.replace(i,digit_contractions[i]), l))\n\t\t\t\t\ttokens.remove(k)\n\t\t\t\t\ttokens = tokens+l\n\t\t\t\telse:\n\t\t\t\t\ttokens.remove(k)\n\tfor k,v in digit_contractions.items():\n\t\tif k in tokens:\n\t\t\tif tokens[tokens.index(k)-1].isdigit():\t\n\t\t\t\ttokens = list(map(lambda b: b.replace(k,v), tokens))\n\t# remove tokens of size less than 2\n\ttokens = [t for t in tokens if len(t) > 2]\n\treturn tokens", "def replace_contractions(sample):\n sample[\"full_text\"] = contractions.fix(sample[\"full_text\"])\n return sample", "def recommended_correction(text):\n tool = language_check.LanguageTool('en-US')\n matches = tool.check(text)\n correction = language_check.correct(text, matches)\n return correction", "def expand_contractions(text):\n text = list(cont.expand_texts([text], precise=True))[0]\n return text \n\n text = \"\"\"three cups of coffee\"\"\"\n doc = nlp(text)\n tokens = [w2n.word_to_num(token.text) if token.pos_ == 'NUM' else token for token in doc]", "def contractions_remove(self,sentence):\n sentence = self.contractions_re.sub(lambda mo: CONTRACTIONS[mo.string[mo.start():mo.end()]], sentence)\n return sentence", "def ocr_correction(token):", "def corrected(text):\n # Init SpellChecker\n spell = SpellChecker(language='fr', distance=1, case_sensitive=False)\n # Get misspellings and corrections\n text_splitted = text.split()\n misspelled = spell.unknown(text_splitted)\n correction = {word:spell.correction(word) for word in misspelled}\n\n # Replace misspellings in text\n for k,v in correction.items():\n text = text.replace(k,v)\n return text", "def removeContractions(self, text=None):\n\n\t\tif type(text) != type(str):\n\t\t\ttext = \" \".join(text)\n\n\t\tfor key in self.norm_words:\n\t\t\ttext = text.replace(key, self.norm_words[key])\n\n\t\ttext = text.split(\" \")\n\n\t\treturn text", "def correct(self, text):\n with self._get_searcher() as searcher:\n query = self._get_query_parser().parse(unicode(text))\n correction = searcher.correct_query(query, unicode(text))\n if correction.query != query:\n formatter = whoosh.highlight.HtmlFormatter()\n return correction.format_string(formatter)\n return None", "def replace_contractions(self):\n self.text = contractions.fix(self.text)\n return self", "def text_dict(text):\n hey_words = {'hello', 'hey', 'hi', 'heya', 'hiya', 'hai'}\n fine_words = {'fine', 'good', 'splendid', 'amazing', 'well', 'lovely', 'cool'}\n bye_words = {'goodbye', 'bye', 'adios'}\n thank_words = {'thanks', 'thank'}\n notes_words = {'notes', 'note'}\n tokens = set(text.split())\n\n if len(notes_words.intersection(tokens)) > 0:\n text = 'Ok sure. Please tell me what to note down.'\n\n elif len(fine_words.intersection(tokens)) > 0:\n text = '''Good to hear that. How may I help you?'''\n\n elif len(hey_words.intersection(tokens)) > 0:\n text = '''Hi Shivek. How are you doing today?'''\n\n elif len(thank_words.intersection(tokens)) > 0:\n text = '''You are welcome. Can I help you with anything else?'''\n\n elif len(bye_words.intersection(tokens)) > 0 or 'see you' in text or 'see ya' in text or 'no thank' in text:\n text = '''Ok goodbye!'''\n\n\n else:\n text = '''Sorry, I didn't get you. Can you please repeat?'''\n return text", "def correction():\n text = request.args.get('text', '')\n text = TextBlob(text)\n return jsonify(text=unicode(text.correct()))", "def correct(search_key, text, strictness):\n\n text_copy = copy.deepcopy(text)\n words = text.split()\n for word in words:\n similarity = SequenceMatcher(None, word, search_key)\n if similarity.ratio() > strictness:\n text_copy = text_copy.replace(word, search_key)\n return text_copy", "def check_for_correction(dummy_arg):\n #space_indexes = []\n #new_space_indexes = []\n #punct_dict = {}\n\n # Get current document and attributes for doc\n doc = XSCRIPTCONTEXT.getDocument()\n selection_supplier = doc.getCurrentController()\n indexAccess = selection_supplier.getSelection()\n #count = indexAccess.getCount() # don't really need this right now. Only loop once..\n\n # get cursor to write corrected word highlighted in blue\n text = doc.Text\n vis_cursor = doc.getCurrentController().getViewCursor() # visible cursor in OpenOffice\n nvis_cursor= vis_cursor.getText().createTextCursorByRange(vis_cursor) # not visible cursor place at same place as visible\n \n # set str and txt for use\n highlighted_txt = indexAccess.getByIndex(0)\n highlighted_str = highlighted_txt.getString()\n \n # strip punctuation from string\n punctuation = set(string.punctuation)\n '''\n for letter in highlighted_str:\n if letter == ' ':\n sp_index = highlighted_str.index(letter)\n space_indexes.append(sp_index + len(space_indexes))\n highlighted_str = highlighted_str.replace(letter,'',1)\n \n for index in space_indexes:\n highlighted_str = highlighted_str[:index] + ' ' + highlighted_str[index:]\n\n \n for letter in highlighted_str:\n if letter in punctuation:\n punc_index = highlighted_str.index(letter)\n punct_dict[letter] = punc_index\n highlighted_str = highlighted_str.replace(letter,'',1)\n '''\n highlighted_str = ''.join(ch for ch in highlighted_str if ch not in punctuation) \n highlighted_str_arr = highlighted_str.split()\n\n corrected_sent = highlighted_str # correct sentence to be displayed at end\n\n # check for correction for each word highlighted\n for word in highlighted_str_arr:\n\n if len(highlighted_str) == 0:\n return None\n\n # call make_correction helper function to correct highlighted word\n else:\n correction = make_correction(word)\n \n # make sure new correction word is present\n if correction:\n # get parent window from document\n parentwin = doc.CurrentController.Frame.ContainerWindow\n # make window\n correction_query_str = word + \" => \" + correction # show old word => new word\n prompt = MessageBox(parentwin, correction_query_str, \"Correction:\", MESSAGEBOX, BUTTONS_YES_NO)\n \n # if yes button pressed, replace word\n if prompt == 2:\n # replace wrong word with corrected word\n corrected_sent = corrected_sent.replace(word, correction)\n '''\n for letter in corrected_sent:\n if letter == ' ':\n sp_index = corrected_sent.index(letter)\n new_space_indexes.append(sp_index + len(new_space_indexes))\n corrected_sent = corrected_sent.replace(letter,'',1)\n \n for index in new_space_indexes:\n corrected_sent = corrected_sent[:index] + ' ' + corrected_sent[index:]\n \n \n punc_location = {}\n for key in punct_dict:\n p_index = punct_dict[key]\n for s_index in space_indexes:\n if abs(p_index-s_index) == 1:\n location_index = space_indexes.index(s_index)\n if p_index-s_index > 0:\n punc_location[key] = new_space_indexes[location_index+1]\n else:\n punc_location[key] = new_space_indexes[location_index-1]\n\n\n for key in punc_location:\n index = punc_location[key]\n corrected_sent = key\n #corrected_sent = corrected_sent[:index] + key + corrected_sent[index:]\n ''' \n #highlighted_txt.setString(highlighted_str)\n highlighted_txt.setString(corrected_sent) # replace old word with nothing", "def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):\n\n def _strip_spaces(text):\n ns_chars = []\n ns_to_s_map = collections.OrderedDict()\n for (i, c) in enumerate(text):\n if c == \" \":\n continue\n ns_to_s_map[len(ns_chars)] = i\n ns_chars.append(c)\n ns_text = \"\".join(ns_chars)\n return (ns_text, ns_to_s_map)\n\n # We first tokenize `orig_text`, strip whitespace from the result\n # and `pred_text`, and check if they are the same length. If they are\n # NOT the same length, the heuristic has failed. If they are the same\n # length, we assume the characters are one-to-one aligned.\n\n tokenizer = BasicTokenizer(do_lower_case=do_lower_case)\n\n tok_text = \" \".join(tokenizer.tokenize(orig_text))\n\n start_position = tok_text.find(pred_text)\n if start_position == -1:\n if verbose_logging:\n logger.info(\n \"Unable to find text: '%s' in '%s'\" % (pred_text, orig_text))\n return orig_text\n end_position = start_position + len(pred_text) - 1\n\n (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)\n (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)\n\n if len(orig_ns_text) != len(tok_ns_text):\n if verbose_logging:\n logger.info(\"Length not equal after stripping spaces: '%s' vs '%s'\",\n orig_ns_text, tok_ns_text)\n return orig_text\n\n # We then project the characters in `pred_text` back to `orig_text` using\n # the character-to-character alignment.\n tok_s_to_ns_map = {}\n for (i, tok_index) in tok_ns_to_s_map.items():\n tok_s_to_ns_map[tok_index] = i\n\n orig_start_position = None\n if start_position in tok_s_to_ns_map:\n ns_start_position = tok_s_to_ns_map[start_position]\n if ns_start_position in orig_ns_to_s_map:\n orig_start_position = orig_ns_to_s_map[ns_start_position]\n\n if orig_start_position is None:\n if verbose_logging:\n logger.info(\"Couldn't map start position\")\n return orig_text\n\n orig_end_position = None\n if end_position in tok_s_to_ns_map:\n ns_end_position = tok_s_to_ns_map[end_position]\n if ns_end_position in orig_ns_to_s_map:\n orig_end_position = orig_ns_to_s_map[ns_end_position]\n\n if orig_end_position is None:\n if verbose_logging:\n logger.info(\"Couldn't map end position\")\n return orig_text\n\n output_text = orig_text[orig_start_position:(orig_end_position + 1)]\n return output_text", "def get_final_text(config: configure_finetuning.FinetuningConfig, pred_text,\n orig_text):\n\n # When we created the data, we kept track of the alignment between original\n # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So\n # now `orig_text` contains the span of our original text corresponding to the\n # span that we predicted.\n #\n # However, `orig_text` may contain extra characters that we don't want in\n # our prediction.\n #\n # For example, let's say:\n # pred_text = steve smith\n # orig_text = Steve Smith's\n #\n # We don't want to return `orig_text` because it contains the extra \"'s\".\n #\n # We don't want to return `pred_text` because it's already been normalized\n # (the SQuAD eval script also does punctuation stripping/lower casing but\n # our tokenizer does additional normalization like stripping accent\n # characters).\n #\n # What we really want to return is \"Steve Smith\".\n #\n # Therefore, we have to apply a semi-complicated alignment heruistic between\n # `pred_text` and `orig_text` to get a character-to-charcter alignment. This\n # can fail in certain cases in which case we just return `orig_text`.\n\n def _strip_spaces(text):\n ns_chars = []\n ns_to_s_map = collections.OrderedDict()\n for i, c in enumerate(text):\n if c == \" \":\n continue\n ns_to_s_map[len(ns_chars)] = i\n ns_chars.append(c)\n ns_text = \"\".join(ns_chars)\n return ns_text, dict(ns_to_s_map)\n\n # We first tokenize `orig_text`, strip whitespace from the result\n # and `pred_text`, and check if they are the same length. If they are\n # NOT the same length, the heuristic has failed. If they are the same\n # length, we assume the characters are one-to-one aligned.\n tokenizer = tokenization.BasicTokenizer(do_lower_case=config.do_lower_case)\n\n tok_text = \" \".join(tokenizer.tokenize(orig_text))\n\n start_position = tok_text.find(pred_text)\n if start_position == -1:\n if config.debug:\n utils.log(\n \"Unable to find text: '%s' in '%s'\" % (pred_text, orig_text))\n return orig_text\n end_position = start_position + len(pred_text) - 1\n\n (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)\n (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)\n\n if len(orig_ns_text) != len(tok_ns_text):\n if config.debug:\n utils.log(\"Length not equal after stripping spaces: '%s' vs '%s'\",\n orig_ns_text, tok_ns_text)\n return orig_text\n\n # We then project the characters in `pred_text` back to `orig_text` using\n # the character-to-character alignment.\n tok_s_to_ns_map = {}\n for (i, tok_index) in six.iteritems(tok_ns_to_s_map):\n tok_s_to_ns_map[tok_index] = i\n\n orig_start_position = None\n if start_position in tok_s_to_ns_map:\n ns_start_position = tok_s_to_ns_map[start_position]\n if ns_start_position in orig_ns_to_s_map:\n orig_start_position = orig_ns_to_s_map[ns_start_position]\n\n if orig_start_position is None:\n if config.debug:\n utils.log(\"Couldn't map start position\")\n return orig_text\n\n orig_end_position = None\n if end_position in tok_s_to_ns_map:\n ns_end_position = tok_s_to_ns_map[end_position]\n if ns_end_position in orig_ns_to_s_map:\n orig_end_position = orig_ns_to_s_map[ns_end_position]\n\n if orig_end_position is None:\n if config.debug:\n utils.log(\"Couldn't map end position\")\n return orig_text\n\n output_text = orig_text[orig_start_position:(orig_end_position + 1)]\n return output_text", "def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):\n\n def _strip_spaces(text):\n ns_chars = []\n ns_to_s_map = collections.OrderedDict()\n for (i, c) in enumerate(text):\n if c == \" \":\n continue\n ns_to_s_map[len(ns_chars)] = i\n ns_chars.append(c)\n ns_text = \"\".join(ns_chars)\n return (ns_text, ns_to_s_map)\n\n # We first tokenize `orig_text`, strip whitespace from the result\n # and `pred_text`, and check if they are the same length. If they are\n # NOT the same length, the heuristic has failed. If they are the same\n # length, we assume the characters are one-to-one aligned.\n tokenizer = BasicTokenizer(do_lower_case=do_lower_case)\n\n tok_text = \" \".join(tokenizer.tokenize(orig_text))\n\n start_position = tok_text.find(pred_text)\n if start_position == -1:\n if verbose_logging:\n logger.info(\n \"Unable to find text: '%s' in '%s'\" % (pred_text, orig_text))\n return orig_text\n end_position = start_position + len(pred_text) - 1\n\n (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)\n (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)\n\n if len(orig_ns_text) != len(tok_ns_text):\n if verbose_logging:\n logger.info(\"Length not equal after stripping spaces: '%s' vs '%s'\",\n orig_ns_text, tok_ns_text)\n return orig_text\n\n # We then project the characters in `pred_text` back to `orig_text` using\n # the character-to-character alignment.\n tok_s_to_ns_map = {}\n for (i, tok_index) in tok_ns_to_s_map.items():\n tok_s_to_ns_map[tok_index] = i\n\n orig_start_position = None\n if start_position in tok_s_to_ns_map:\n ns_start_position = tok_s_to_ns_map[start_position]\n if ns_start_position in orig_ns_to_s_map:\n orig_start_position = orig_ns_to_s_map[ns_start_position]\n\n if orig_start_position is None:\n if verbose_logging:\n logger.info(\"Couldn't map start position\")\n return orig_text\n\n orig_end_position = None\n if end_position in tok_s_to_ns_map:\n ns_end_position = tok_s_to_ns_map[end_position]\n if ns_end_position in orig_ns_to_s_map:\n orig_end_position = orig_ns_to_s_map[ns_end_position]\n\n if orig_end_position is None:\n if verbose_logging:\n logger.info(\"Couldn't map end position\")\n return orig_text\n\n output_text = orig_text[orig_start_position:(orig_end_position + 1)]\n return output_text", "def decipher(self, text, code):\n # remove capitalization to search for words\n text.lower()\n # remove any punctuation\n text = text.translate(str.maketrans('', '', string.punctuation))\n # get words to search for in text\n secret_words = code.keys()\n # check if text contains any secret words and collect their boltzman values\n boltzmanns = []\n for word in text.split():\n if word in secret_words:\n boltzmanns.append(code[word])\n # return None if no code words in text\n if len(boltzmanns) == 0:\n return None\n # return boltzmann average value for code words present\n else:\n avg = sum(boltzmanns) / float(len(boltzmanns))\n return avg", "def canned():\n return (next_phrase(\"we proceed as follows\") |\n (next_word('the') + \n first_word('result lemma theorem proposition corollary') +\n next_word('now').possibly() +\n next_word('follows')) |\n next_phrase('the other cases are similar') |\n (next_phrase('the proof is')+ first_word('obvious trivial easy routine'))).nil().expect('canned')", "def correct_case(token, corrections_map, structures):\n alt_case_mode = -1 # Most common variation\n if token[0].isupper():\n if sum(char.isupper() for char in token) > 2:\n alt_case_mode = 0 # Upper case variation\n else:\n alt_case_mode = 1 # Lower case variation with capital first letter\n\n corrected_case_map = {}\n for correct_word, score in corrections_map.items():\n if correct_word.find(\" \") != -1:\n words = correct_word.split(\" \")\n\n keys_left = find_correct_case(words[0], alt_case_mode, structures)\n keys_right = find_correct_case(words[1], alt_case_mode, structures)\n key = keys_left+\" \"+keys_right\n else:\n key = find_correct_case(correct_word, alt_case_mode, structures)\n\n # If the key already exists we keep the highest score\n if key in corrected_case_map.keys():\n old_score = corrected_case_map[key]\n corrected_case_map[key] = max(old_score, score)\n else:\n corrected_case_map[key] = score\n\n return corrected_case_map", "def decontracted(phrase):\r\n # specific\r\n phrase = re.sub(r\"won\\'t\", \"will not\", phrase)\r\n phrase = re.sub(r\"can\\'t\", \"can not\", phrase)\r\n # general\r\n phrase = re.sub(r\"n\\'t\", \" not\", phrase)\r\n phrase = re.sub(r\"\\'re\", \" are\", phrase)\r\n phrase = re.sub(r\"\\'s\", \" is\", phrase)\r\n phrase = re.sub(r\"\\'d\", \" would\", phrase)\r\n phrase = re.sub(r\"\\'ll\", \" will\", phrase)\r\n phrase = re.sub(r\"\\'t\", \" not\", phrase)\r\n phrase = re.sub(r\"\\'ve\", \" have\", phrase)\r\n phrase = re.sub(r\"\\'m\", \" am\", phrase)\r\n return phrase", "def clean_for_comparison(text):\n text = clean_text(text)\n text = clean_text_from_nonbasic_characters(text)\n return text", "def check_spellings(text):\n\n for word in vocabulary:\n text = correct(word, text, 0.7)\n return text", "def corrigir_doc(frase):\r\n\r\n if not frase or not (isinstance(frase, str) and all(x.isalpha() for x in frase.split())) or ' ' in frase:\r\n raise ValueError('corrigir_doc: argumento invalido')\r\n\r\n palavras, doc_corrigido = frase.split(' '), ''\r\n palavras = list(map(corrigir_palavra, palavras))\r\n\r\n while palavras:\r\n anagrama_teste = palavras.pop(0)\r\n if palavras:\r\n for word in palavras:\r\n if eh_anagrama(anagrama_teste, word) and anagrama_teste.lower() != word.lower():\r\n del palavras[palavras.index(word)]\r\n doc_corrigido += ' ' + anagrama_teste\r\n\r\n return doc_corrigido[1:]", "def get_exhaustive_text_correction_proposal(self, input_text):\n arr = []\n self.complexity=20\n prev = self.alpha\n self.alpha = 0.95 # temporary\n arr_i = 0\n\n with torch.no_grad():\n for text_chunk in tqdm(self._string_to_chunks(input_text)):\n self.oryginal_input_text = text_chunk\n self.input_text = text_chunk\n self._compute_exhaustive_outputs()\n\n for ix in range(self.input_size):\n token_id = self.input_ids[0][ix]\n token_obj = {}\n token_obj[\"name\"] = self.tokenizer.decode(token_id.item())\n token_obj[\"probability\"] = self.normalized_token_prob[ix]\n token_obj[\"oddballness\"] = self._get_oddballness_proba(token_obj[\"probability\"], self.probs[ix],\n alpha=self.alpha).item()\n arr.append(token_obj)\n\n self.input_text = self.oryginal_input_text\n self._compute_outputs()\n for ix in range(self.input_size):\n self.sorted_probs, self.sorted_indices = torch.sort(self.probs[ix - 1], descending=True)\n _, correction_indices = self._get_best_tokens(5)\n\n arr[arr_i + ix][\"corrections\"] = [self.tokenizer.decode(token_id.item()) for token_id in correction_indices]\n arr_i += self.input_size\n\n arr.pop()\n arr.pop(0)\n self._trim_bpe_space_artifact(arr)\n self.token_array = arr\n self.alpha = prev # temporary\n return arr", "def is_correction(self):\n # OK, go looking for RESENT style tags, assume it happens within first\n # 300 chars\n if RESENT.search(self.text[:300]):\n return True\n if self.bbb is None or not self.bbb:\n return False\n if self.bbb[0] in ['A', 'C']:\n return True\n return False", "def oracle(c):\n correct_arcs = get_arcs(c.sentence)\n if can_left_arc(c, correct_arcs):\n return Transition('la', c.sentence[c.stack[-1]].deprel)\n elif can_right_arc(c, correct_arcs):\n return Transition('ra', c.sentence[c.buffer[0]].deprel)\n else:\n return Transition('sh', '_')", "def clean_up(text_not_allowed, raw_string):\n trans_table = str.maketrans(dict.fromkeys(text_not_allowed))\n clean_result = raw_string.translate(trans_table)\n return clean_result", "def can_recept(self, text, *args, **kwargs):\n for each_cur in self.flat_norm.keys():\n if each_cur.lower() in text.lower():\n return True\n\n else:\n return False", "def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):\n\n # When we created the data, we kept track of the alignment between original\n # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So\n # now `orig_text` contains the span of our original text corresponding to the\n # span that we predicted.\n #\n # However, `orig_text` may contain extra characters that we don't want in\n # our prediction.\n #\n # For example, let's say:\n # pred_text = steve smith\n # orig_text = Steve Smith's\n #\n # We don't want to return `orig_text` because it contains the extra \"'s\".\n #\n # We don't want to return `pred_text` because it's already been normalized\n # (the SQuAD eval script also does punctuation stripping/lower casing but\n # our tokenizer does additional normalization like stripping accent\n # characters).\n #\n # What we really want to return is \"Steve Smith\".\n #\n # Therefore, we have to apply a semi-complicated alignment heuristic between\n # `pred_text` and `orig_text` to get a character-to-character alignment. This\n # can fail in certain cases in which case we just return `orig_text`.\n\n def _strip_spaces(text):\n ns_chars = []\n ns_to_s_map = collections.OrderedDict()\n for (i, c) in enumerate(text):\n if c == \" \":\n continue\n ns_to_s_map[len(ns_chars)] = i\n ns_chars.append(c)\n ns_text = \"\".join(ns_chars)\n return (ns_text, ns_to_s_map)\n\n # We first tokenize `orig_text`, strip whitespace from the result\n # and `pred_text`, and check if they are the same length. If they are\n # NOT the same length, the heuristic has failed. If they are the same\n # length, we assume the characters are one-to-one aligned.\n tokenizer = BasicTokenizer(do_lower_case=do_lower_case)\n\n tok_text = \"\".join(tokenizer.tokenize(orig_text))\n\n start_position = tok_text.find(pred_text)\n if start_position == -1:\n if verbose_logging:\n print(\"Unable to find text: '%s' in '%s'\" % (pred_text, orig_text))\n return orig_text\n end_position = start_position + len(pred_text) - 1\n\n (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)\n (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)\n\n if len(orig_ns_text) != len(tok_ns_text):\n if verbose_logging:\n print(\"Length not equal after stripping spaces: '%s' vs '%s'\" % (orig_ns_text, tok_ns_text))\n return orig_text\n\n # We then project the characters in `pred_text` back to `orig_text` using\n # the character-to-character alignment.\n tok_s_to_ns_map = {}\n for (i, tok_index) in tok_ns_to_s_map.items():\n tok_s_to_ns_map[tok_index] = i\n\n orig_start_position = None\n if start_position in tok_s_to_ns_map:\n ns_start_position = tok_s_to_ns_map[start_position]\n if ns_start_position in orig_ns_to_s_map:\n orig_start_position = orig_ns_to_s_map[ns_start_position]\n\n if orig_start_position is None:\n if verbose_logging:\n print(\"Couldn't map start position\")\n return orig_text\n\n orig_end_position = None\n if end_position in tok_s_to_ns_map:\n ns_end_position = tok_s_to_ns_map[end_position]\n if ns_end_position in orig_ns_to_s_map:\n orig_end_position = orig_ns_to_s_map[ns_end_position]\n\n if orig_end_position is None:\n if verbose_logging:\n print(\"Couldn't map end position\")\n return orig_text\n\n output_text = orig_text[orig_start_position:(orig_end_position + 1)]\n return output_text", "def test__same_text_correlation(self):\n \n _log.info('-'*80)\n \n # arrange \n text1 = \"love is rain as long story short\"\n text2 = text1\n\n dump_file = getInputFile(\"swiki_knowledge_output.xml\")\n parsed_file = getOutputFile(\"swiki_knowledge_output.parsed.xml\")\n #wdb_file = getOutputFile(\"swiki_knowledge_output.wdb\")\n\n articles = ['Rain', 'Love', 'Tree'] \n \n # act\n wn.make_dump(dump_file, articles, compress=False)\n wn.parse_dump(dump_file, parsed_file)\n db_wrapper = wn.build_database_wrapper(parsed_file, StopWordsStemmer([]))\n \n #self.addCleanup(os.remove, self.tmp_dump_file)\n \n comparer = SemanticComparer(db_wrapper)\n correlation = comparer.compare(text1, text2)\n _log.info(test_utils.get_texts_correlation_message(text1, text2, correlation))\n self.assertAlmostEqual(correlation, 1.0, msg=\"for same text correlation should be 1\")", "def caesar(action, input_text, shift_amount):\n\n transformed_text = ''\n\n # If the action is decode, make the shift_amount as negative\n if action == 'decode':\n shift_amount *= -1\n\n # Add/Subtract the shift value to get the index from alphabets list\n for alphabet in input_text:\n if alphabet in alphabets:\n index = alphabets.index(alphabet)\n new_index = index + shift_amount\n new_alphabet = alphabets[new_index]\n transformed_text += new_alphabet\n else:\n transformed_text += alphabet\n\n # Print the result\n print(f'The {action}d text is {transformed_text}')", "def calculate_cer(normalized_texts: List[str], transcript: str, remove_punct=False) -> List[Tuple[str, float]]:\n normalized_options = []\n for text in normalized_texts:\n text_clean = text.replace('-', ' ').lower()\n if remove_punct:\n for punct in \"!?:;,.-()*+-/<=>@^_\":\n text_clean = text_clean.replace(punct, \"\")\n cer = round(word_error_rate([transcript], [text_clean], use_cer=True) * 100, 2)\n normalized_options.append((text, cer))\n return normalized_options", "def calculate_cer(normalized_texts: List[str], transcript: str, remove_punct=False) -> List[Tuple[str, float]]:\n normalized_options = []\n for text in normalized_texts:\n text_clean = text.replace('-', ' ').lower().strip()\n if remove_punct:\n for punct in \"!?:;,.-()*+-/<=>@^_\":\n text_clean = text_clean.replace(punct, \" \")\n text_clean = re.sub(r' +', ' ', text_clean)\n cer = round(word_error_rate([transcript], [text_clean], use_cer=True) * 100, 2)\n normalized_options.append((text, cer))\n return normalized_options", "def deCopIfy(text):\n\tif text == \"\":\n\t\treturn text\n\n\tfor lingo in coplingo:\n\t\ttext = re.sub(lingo['regex'], lingo['str'], text)\n\n\treturn text[0].upper() + text[1:]", "def auto_correct(cls, input_string):\n\n # input_string = cls.filter_input(input_string)\n\n possible_corrections = dict()\n output = []\n\n for token in input_string.strip().split(\" \"):\n # print(token)\n for category, words in cls.correction_dict.items():\n possible_corrections[category] = difflib.get_close_matches(token.title(), words, 1, cls.cutoff)\n\n # print(\"Possible:\", flatten([i for i in possible_corrections.values()]))\n\n corrected = difflib.get_close_matches(token.title(), flatten([i for i in possible_corrections.values()]), 1,\n cls.cutoff)\n if corrected:\n output.append(corrected[0])\n\n return ' '.join(output)", "def lf_abnormal_interp_with_discont(report):\n if 'interpretation' in report.sections.keys():\n interpretation = report.sections['interpretation']\n interp_text = interpretation['text']\n return abnormal_interp_with_discont(interp_text)\n elif 'summary' in report.sections:\n return abnormal_interp_with_discont(report.sections['summary']['text'])\n elif 'findings' in report.sections: # fall back to look in the findings \n if 'summary' in report.sections['findings']: # fall back to look for a summary instead\n return abnormal_interp_with_discont(report.sections['findings']['summary'])\n if 'impression' in report.sections['findings']:\n return abnormal_interp_with_discont(report.sections['findings']['impression']) \n return ABSTAIN_VAL\n elif 'narrative' in report.sections: # fall back to look in the findings \n ky = 'narrative'\n if 'summary' in report.sections[ky]: # fall back to look for a summary instead\n return abnormal_interp_with_discont(report.sections[ky]['summary'])\n if 'impression' in report.sections[ky]:\n return abnormal_interp_with_discont(report.sections[ky]['impression']) \n return ABSTAIN_VAL \n else:\n return ABSTAIN_VAL", "def select_best_match(\n self, normalized_texts: List[str], transcript: str, verbose: bool = False, remove_punct: bool = False\n ):\n normalized_texts = calculate_cer(normalized_texts, transcript, remove_punct)\n normalized_texts = sorted(normalized_texts, key=lambda x: x[1])\n normalized_text, cer = normalized_texts[0]\n\n if verbose:\n print('-' * 30)\n for option in normalized_texts:\n print(option)\n print('-' * 30)\n return normalized_text, cer", "def intent_of_text_LnDOR(ChapterTextS, TargetQuestionsD, TestS, StopWords):\n \n # Chapter Text - stokenize\n StokensCT = stokenize(ChapterTextS, StopWords) \n\n # Test question - stokenize\n StokensTest = stokenize(TestS, StopWords)\n\n # Knowledge Base Dict - stokenize\n KBD_structure = stokenizeKBD(TargetQuestionsD, StopWords)\n\n # List (because list is mutable, set is not) of all stokens in document\n StokensDoc = StokensCT[:] # from chapter text\n StokensDoc.extend(StokensTest[:]) # += Test string\n\n # extend list of stokens in Doc\n for i in TargetQuestionsD:\n StokensDoc.extend(TargetQuestionsD[i][\"mq stokens\"][:]) # += KB target [matched Q]s\n StokensDoc.extend(TargetQuestionsD[i][\"ans stokens\"][:]) # += KB answers\n \n StokensTestV = set(StokensTest)\n StokensDocV = set(StokensDoc)\n StokensAntiTgtV = StokensDocV\n \n # Complement of all targets\n for i in TargetQuestionsD:\n StokensAntiTgtV = StokensAntiTgtV.difference(set(TargetQuestionsD[i][\"mq stokens\"]))\n \n # calculate confusion matrix and DOR etc.\n LnDORD = {}\n # Anti Target\n TP, FP, FN, TN = confusion_matrix(StokensDocV, StokensAntiTgtV, StokensTestV) \n LnDOR = lndor(TP, FP, FN, TN) \n someAngle = angleDOR(TP, FP, FN, TN) \n \n LnDORD[\"AntiTgt\"] = {'lndor': LnDOR, 'theta': someAngle}\n\n # total occurences\n total_occ = 0\n for i in TargetQuestionsD:\n total_occ += TargetQuestionsD[i]['count']\n\n for i in TargetQuestionsD:\n StokensTgtV = set(TargetQuestionsD[i][\"mq stokens\"][:])\n\n TP, FP, FN, TN = confusion_matrix(StokensDocV, StokensTgtV, StokensTestV) \n priorOR = TargetQuestionsD[i]['count'] / total_occ\n\n LnDOR = lndor(TP, FP, FN, TN) \n someAngle = angleDOR(TP, FP, FN, TN, priorOR) \n \n LnDORD[i] = {'lndor': LnDOR, 'theta': someAngle}\n # LnDORD = {i: {'lndor': , 'theta': }}, KB indices + \"AntiTgt\"\n\n return LnDORD", "def hasConstantForm(self, sentence):", "def segue_canned_texts(segue, kind, excecute_code=True):\n\n def enrich_with_phrases(segue, kind):\n \"\"\"Retrieves the two phrases associated to the two nodes composing the segue\n The two phrases are added to the segue as two new keys, to be exploited in short, line and description texts.\n\n Args:\n segue (d)\n kind (str): whether short, line or description\n\n Returns:\n d: segue dictionary enriched with phrases\n \"\"\"\n phrase_n1 = phrase(segue['n1'], segue['compare_function'], kind)\n phrase_n2 = phrase(segue['n2'], segue['compare_function'], kind)\n if phrase_n1 is not None:\n segue = {**{'phrase_n1': phrase_n1(segue['n1'], segue)}, **segue}\n if phrase_n2 is not None:\n segue = {**{'phrase_n2': phrase_n2(segue['n2'], segue)}, **segue}\n return segue\n\n assert kind in ['line', 'description', 'short']\n\n if kind == 'description':\n text = search_dictionary_dicothomic(_description_dichotomic, segue)\n\n elif kind == 'line':\n text = search_dictionary_atomic(_line_atomic, segue)\n text = search_dictionary_dicothomic(_line_dichotomic, segue) if text is None else text\n\n elif kind == 'short':\n text = search_dictionary_atomic(_short_atomic, segue)\n text = search_dictionary_dicothomic(_short_dichotomic, segue) if text is None else text\n if text is None:\n try:\n text = segue_canned_texts(segue, 'line', excecute_code=False)\n except KeyError:\n pass\n\n if text is not None:\n\n if excecute_code:\n segue = enrich_with_phrases(segue, kind)\n\n if type(text) == tuple:\n # dicothomic\n text = f\"{text[0](segue['n1'], segue['n2'], segue)} {text[1](segue['n1'], segue['n2'], segue)}\"\n else:\n # atomic\n text = text(segue['n1'], segue['n2'], segue)\n\n # Postprocessing\n # Fix multiple spaces\n text = ' '.join(text.split())\n # First letter is capital\n text = capitalize_first_word(text)\n # Fix spaces among word and 's in genitivo sassone\n text = re.sub(r\"([a-zA-Z]+)\\s+('s)\", r\"\\1\\2\", text)\n text = text.replace(', ,', ',')\n\n return text\n else:\n raise KeyError(\n f\"It was not possible to find a {kind} for node of types, respectively {segue['n1']['type']} and {segue['n2']['type']}, with compare function {segue['compare_function']}. The ids of the nodes are, respectively {segue['n1']['id']} and {segue['n2']['id']}\")", "def __citation_correction(self, bs, ground_truth):\n bs_ref = bs.findNext('bibl')\n gt_ref = ground_truth.findNext('ref')\n while gt_ref is not None:\n if gt_ref.find('article-title') != bs_ref.title:\n pass\n gt_ref = gt_ref.findNext('ref')", "def recept(self, text, *args, **kwargs):\n results = []\n for each_cur in self.flat_norm.keys():\n if each_cur.lower() in text.lower():\n results.append(self.flat_norm[each_cur])\n\n if results:\n # we have something in results\n print(\"DictionarySlotReceptorMixin.recept: %s grasped results: %s\" % (self, results))\n # TODO make productions signals?\n return results", "def is_consonant(text):\n return text.lower() in AVRO_CONSONANTS", "def extrairFrase(self, documento):\n unicWords = self.unicWords()\n doc = set(documento)\n caracteristicas ={}\n for palavras in unicWords:\n caracteristicas['%s'%palavras]=(palavras in doc)\n return caracteristicas", "def verify_decrypt_key(self):\r\n\t\tpercent_english = Dict_Control(self.my_code).check_key()\r\n\t\t#If more than half the words are english, the key will pass. \r\n\t\tif percent_english > 50:\r\n\t\t\tself.right_key = False\r\n\t\t#If the key does not pass, the program will give you a warning and prompt you for another key. \r\n\t\telse: \r\n\t\t\tprint(f\"After decryption, it looks like only {percent_english}% of your words are english, you may have entered the wrong key?\")", "def context_spell_correct(\n self,\n sentence: str,\n suggestions_dict: Dict[str, List[str]]\n ) -> (str, Dict[str, str]):\n sentence_tokens = sentence.lower().split()\n possible_sent_list = []\n for each in sentence_tokens:\n if each in suggestions_dict:\n possible_sent_list.append(suggestions_dict[each])\n else:\n possible_sent_list.append([each])\n\n corrected_sent = self.get_most_probable_sentence(possible_sent_list)\n corrected_dict = self.get_corrected_words_map(corrected_sent, sentence)\n context_corrected_sentence = \" \" + sentence + \" \"\n for token, suggestion in corrected_dict.items():\n context_corrected_sentence = re.sub(\" \" + re.escape(token) + \" \",\n \" \" + suggestion + \" \",\n context_corrected_sentence)\n context_corrected_sentence = context_corrected_sentence.strip()\n return context_corrected_sentence, corrected_dict", "def conjugate_present_are_verb(verb, pronoun, tense):\n\n are_endings = {\"io\": \"o\", \"tu\": \"i\", \"lui\": \"a\", \"lei\": \"a\", \"noi\": \"iamo\", \"voi\": \"ate\", \"loro\": \"ano\"}\n giare_endings = {\"io\": \"io\", \"tu\": \"i\", \"lui\": \"ia\", \"lei\": \"ia\", \"noi\": \"iamo\", \"voi\": \"iate\", \"loro\": \"iano\"}\n ciare_endings = {\"io\": \"o\", \"tu\": \"\", \"lui\": \"a\", \"lei\": \"a\", \"noi\": \"amo\", \"voi\": \"ate\", \"loro\": \"ano\"}\n add_h = {\"io\": \"o\", \"tu\": \"hi\", \"lui\": \"a\", \"lei\": \"a\", \"noi\": \"hiamo\", \"voi\": \"ate\", \"loro\": \"ano\"}\n irregular_are = [\"fare\", \"andare\"]\n fare = {\"io\": \"faccio\", \"tu\": \"fai\", \"lei\": \"fa\", \"lui\": \"fa\", \"noi\": \"facciamo\", \"voi\": \"fate\", \"loro\": \"fanno\"}\n andare = {\"io\": \"vado\", \"tu\": \"vai\", \"lui\": \"va\", \"lei\": \"va\", \"noi\": \"andiamo\", \"voi\": \"andate\", \"loro\": \"vanno\"}\n\n # this section checks for the irregular verbs fare, andare\n if verb in irregular_are:\n if verb == \"fare\":\n return fare[pronoun]\n else:\n return andare[pronoun]\n\n # this section checks for spelling issues like with mancare in order to preserve hard \"k\" sound of infinitive\n # if it's a verb like mancare then the if section adds an \"h\" for the spelling to preserve hard \"k\" sound\n # if it's a normal -are verb, then the else section conjugates it normally\n if verb[-5:] == \"giare\":\n stripped_verb = strip_off_ending(verb, tense)\n new_verb = stripped_verb + giare_endings[pronoun]\n return new_verb\n if verb[-5:] == \"ciare\":\n stripped_verb = strip_off_ending(verb, tense)\n new_verb = stripped_verb + ciare_endings[pronoun]\n return new_verb\n if verb[-4:] == \"care\":\n stripped_verb = strip_off_ending(verb, tense)\n new_verb = stripped_verb + add_h[pronoun]\n return new_verb\n if verb[-4:] == \"gare\":\n stripped_verb = strip_off_ending(verb, tense)\n new_verb = stripped_verb + add_h[pronoun]\n return new_verb\n else:\n stripped_verb = strip_off_ending(verb, tense)\n new_verb = stripped_verb + are_endings[pronoun]\n return new_verb", "def applyCoder(text, coder):\n ciphertext = str()\n #for each letter in the text find it, and grab shifted letter\n for letter in text:\n ciphertext += coder.get(letter, letter)\n return ciphertext", "def editex(str1, str2, min_threshold = None):\n\n # Quick check if the strings are empty or the same - - - - - - - - - - - - -\n #\n if (str1 == '') or (str2 == ''):\n return 0.0\n elif (str1 == str2):\n return 1.0\n\n n = len(str1)\n m = len(str2)\n\n # Values for edit costs - - - - - - - - - - - - - - - - - - - - - - - - - - -\n #\n BIG_COSTS = 2 # If characters are not in same group\n SML_COSTS = 1 # If characters are in same group\n\n # Mappings of letters into groups - - - - - - - - - - - - - - - - - - - - - -\n #\n groupsof_dict = {'a':0, 'b':1, 'c':2, 'd':3, 'e':0, 'f':1, 'g':2, 'h':7,\n 'i':0, 'j':2, 'k':2, 'l':4, 'm':5, 'n':5, 'o':0, 'p':1,\n 'q':2, 'r':6, 's':2, 't':3, 'u':0, 'v':1, 'w':7, 'x':2,\n 'y':0, 'z':2, '{':7}\n\n # Function to calculate cost of a deletion - - - - - - - - - - - - - - - - -\n #\n def delcost(char1, char2, groupsof_dict):\n\n if (char1 == char2):\n return 0\n\n code1 = groupsof_dict.get(char1,-1) # -1 is not a char\n code2 = groupsof_dict.get(char2,-2) # -2 if not a char\n\n if (code1 == code2) or (code2 == 7): # Same or silent\n return SML_COSTS # Small difference costs\n else:\n return BIG_COSTS\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\n if (' ' in str1):\n str1 = str1.replace(' ','{')\n if (' ' in str2):\n str2 = str2.replace(' ','{')\n\n if (n > m): # Make sure n <= m, to use O(min(n,m)) space\n str1, str2 = str2, str1\n n, m = m, n\n\n row = [0]*(m+1) # Generate empty cost matrix\n F = []\n for i in range(n+1):\n F.append(row[:])\n\n F[1][0] = BIG_COSTS # Initialise first row and first column of cost matrix\n F[0][1] = BIG_COSTS\n\n sum = BIG_COSTS\n for i in range(2,n+1):\n sum += delcost(str1[i-2], str1[i-1], groupsof_dict)\n F[i][0] = sum\n\n sum = BIG_COSTS\n for j in range(2,m+1):\n sum += delcost(str2[j-2], str2[j-1], groupsof_dict)\n F[0][j] = sum\n\n for i in range(1,n+1):\n\n if (i == 1):\n inc1 = BIG_COSTS\n else:\n inc1 = delcost(str1[i-2], str1[i-1], groupsof_dict)\n\n for j in range(1,m+1):\n if (j == 1):\n inc2 = BIG_COSTS\n else:\n inc2 = delcost(str2[j-2], str2[j-1], groupsof_dict)\n\n if (str1[i-1] == str2[j-1]):\n diag = 0\n else:\n code1 = groupsof_dict.get(str1[i-1],-1) # -1 is not a char\n code2 = groupsof_dict.get(str2[j-1],-2) # -2 if not a char\n\n if (code1 == code2): # Same phonetic group\n diag = SML_COSTS\n else:\n diag = BIG_COSTS\n\n F[i][j] = min(F[i-1][j]+inc1, F[i][j-1]+inc2, F[i-1][j-1]+diag)\n\n w = float(F[n][m])\n\n if (w < 0.0):\n w = 0.0\n return w", "def spell_correction(self, tweet):\n return self.spell_correct.correct(tweet)", "def negation_check(self,sentence):", "def analyze(self, text):\n\n score = 0.0;\n\n words = text.split(' ')\n # match each word in either the positives or negatives list adding or subtracting 1 from the score if present\n for word in words:\n for w in self.positives:\n if w == word.lower():\n score += 1.0\n continue\n \n for w in self.negatives:\n if w == word.lower():\n score -= 1.0\n continue\n\n return score", "def remove_non_narration_strings(transcription_row):\n sentence = transcription_row[\"text\"]\n # filter out (CAPITALIZED WORD) and \"CAPITALIZED WORD\". These are not enunciated in the voiceover, but rather\n # indicate noise/words from the original audio track that get interspersed into the voice\n # Might contain special characters\n # Update: Capitalization etc are inconsistent. But all follow the pattern \"text\" and (text). Remove these instead\n crosstalk_pattern = '\\(.*?\\)|\\\".*?\\\"'\n # crosstalk_findings = re.findall(crosstalk_pattern, sentence)\n # print(\"Crosstalk: \"+str(crosstalk_findings))\n sentence = re.sub(crosstalk_pattern, \" \", sentence)\n # filter out ' s ' ' Ss ' etc\n s_pattern = r'\\b[sS]+\\b'\n s_pattern_findings = re.findall(s_pattern, sentence)\n # if len(s_pattern_findings) > 0:\n # print(\"S-pattern: \"+str(s_pattern_findings))\n sentence = re.sub(s_pattern, \" \", sentence)\n transcription_row[\"text\"] = sentence\n return transcription_row", "def analyze(self, text):\n \n total_words = len(text)\n \n negatives_length = len(self.negatives)\n positives_length = len(self.positives)\n \n posneg_sum = 0\n \n for word in text:\n \n for j in range(0, positives_length):\n if word == self.positives[j][:-1]:\n posneg_sum += 1\n \n for k in range(0, negatives_length):\n if word == self.negatives[k][:-1]:\n posneg_sum -= 1\n\n return posneg_sum", "def decryptStory():\n wordList = loadWords()\n text = getStoryString() \n k = findBestShift(wordList, text)\n \n return applyShift(text, k)", "def preprocess(text):\n text = remove_space(text)\n text = clean_special_punctuations(text)\n text = clean_number(text)\n text = decontracted(text)\n text = correct_spelling(text)\n text = spacing_punctuation(text)\n text = spacing_some_connect_words(text)\n text = clean_repeat_words(text)\n text = remove_space(text)\n text = text.lower()\n return text", "def get_text(self):\n text_complet = \"\"\n rez_dict = self.__results\n for i in range(0, len(rez_dict[\"text\"])):\n text = rez_dict[\"text\"][i]\n conf = int(rez_dict[\"conf\"][i])\n if conf > self.__min_confidence:\n text_complet += text + \" \"\n return text_complet", "def processClauseText(intext, mtype): # type: (str, str) -> []\n\n global classifications, accsearch, unaccsearch\n\n retlist = []\n texts = []\n if mtype == 'text' or mtype == 'txt':\n texts = extractClauses(intext)\n elif 'pdf' in mtype:\n plaintext = parsepdf(intext)\n texts = extractPDFClauses(plaintext)\n elif 'word' in mtype:\n plaintext = parseword(intext)\n texts = extractClauses(plaintext)\n\n results = predicteula(texts)\n if len(results['prediction']) == 0:\n return retlist\n\n accs = [[win[1] for win in curwins] for curwins in results['windows']]\n probs = [max(curwins) for curwins in accs]\n\n for text, prediction in zip(texts, probs):\n if len(text.split()) > 9:\n curclause = {}\n curclause['origclause'] = text\n curclause['classification'] = 'Not Sure'\n if prediction > 0.6:\n curclause['classification'] = 'Acceptable'\n elif prediction < 0.4:\n curclause['classification'] = 'Unacceptable'\n curclause['score'] = prediction\n curclause['accclause'] = dicesearch(text, accsearch)\n curclause['unaccclause'] = dicesearch(text, unaccsearch)\n retlist.append(curclause)\n\n return retlist", "def match_contract_to_charter_constraints(contract, charter, charter_constraints, charity_constraints):\n\n r_quotes = []\n r_vector = []\n\n quote_slice = slice(0, 17)\n\n if 'subj' not in contract.sections:\n raise ValueError(\"contract has no subject section\")\n\n subj = contract.sections['subj'].body\n print(subj.untokenize_cc())\n print('------')\n if subj.embeddings is None:\n print(\"Subj embeddings are gone, restoring...\")\n subj.embeddings = contract.embeddings[subj.start:subj.end]\n # subj.tokens = doc.tokens[subj.start:subj.end]\n # subj.tokens_cc = doc.tokens_cc[subj.start:subj.end]\n # subj.embedd( GLOBALS__['CharterAnlysingContext'].pattern_factory )\n print('\\t\\t sample:', subj.embeddings[0][1:10])\n\n for head_type in charter_constraints:\n\n ##charity:\n if head_type in charity_constraints:\n print(f'{head_type} has charity constrinats')\n \n charity_constraints_by_head = charity_constraints[head_type]\n charity_constraints_by_head_new = []\n \n charity_constraints['new.'+head_type] = charity_constraints_by_head_new\n \n for i in range(len(charity_constraints_by_head)):\n _tuple = charity_constraints_by_head[i] \n# for cc in charity_constraints[head_type]:\n _slice = _tuple[0]\n emb_charter = charter.sections[head_type].body.embeddings[_slice]\n \n distance = 1 - DF(emb_charter, subj.embeddings[5:])\n \n# cc.add['subj_correlation'] = distance\n \n# detupling\n charity_constraints_by_head_new.append ( {\n 'slice':_slice,\n 'subj_correlation': distance,\n 'confidence': _tuple[1],\n 'sum': _tuple[2]\n })\n \n print('\\t'*4, 'cc=', charity_constraints_by_head_new[i])\n \n # print('\\t\\t---CC', cc[0])\n \n\n # GLOBALS__['CharterAnlysingContext'].doc.sections['head.directors'].body.embeddings[_slice]\n\n ##------------------------charity end\n print(f'measuring {head_type} constraints...'.upper())\n cc = charter_constraints[head_type]\n quotes = cc['sentences']\n for quote in quotes:\n print()\n _q = untokenize(quote['subdoc'].tokens_cc[quote_slice])\n print(_q)\n\n distance = 1 - DF(quote['subdoc'].embeddings[quote_slice],\n subj.embeddings[5:])\n\n quote['subj_correlation'] = distance\n\n print(f'distance = {distance:.4f}')\n\n r_quotes.append(_q)\n r_vector.append(distance)\n r_quotes.append('\\n')\n r_vector.append(distance)\n\n GLOBALS__['renderer'].render_color_text(r_quotes, r_vector)\n print(r_vector)\n print(r_quotes)", "def lf_normal_interp_not_seizure(report):\n # print(report)\n\n for keyinterp in CANDIDATE_INTERPS_LOWER:\n if keyinterp in report.sections.keys():\n interpretation = report.sections[keyinterp]\n if isinstance(interpretation, dict):\n interp_text = interpretation['text']\n else:\n interp_text = interpretation\n \n if SIMPLE_NORMAL_RE.search(interp_text):\n if NORMAL_STUDY_PHRASES.search(interp_text):\n #return NO_SEIZURE_VAL\n return OTHERS_VAL\n #return NA_VAL\n else:\n logger.info(f'warning did not get second normal match: {interp_text}')\n return ABSTAIN_VAL\n \n else:\n return ABSTAIN_VAL\n\n return ABSTAIN_VAL", "def decryptStory():\n wordList = loadWords()\n text= getStoryString()\n \n shift = findBestShift(wordList, text)\n return applyShift(text, shift)", "def analyze(self, text):\n #analize every word in the text a value -1, 1 or 0 and calculate total score\n #tokens allow us to split words in single tokens we can initialize tokens like this:\n\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text.lower())\n\n score = 0\n\n if tokens[0] in self.negatives:\n score =- 1\n elif tokens[0] in self.positives:\n score =+ 1\n else:\n score = 0\n\n #print('', text)\n\n return score", "def censor(text: str) -> str:\n\n # Split up individual words in the text\n tokens: List[str] = text.split(\" \")\n\n # Create a mapping of 0 if the word is okay, 1 if it should be censored\n censor_mask: List[int] = predict([word for word in tokens])\n\n # A list of tuples with the first element being the word and the second being 0 or 1\n censor_map: List[Tuple[str, int]] = list(zip(tokens, censor_mask))\n\n # A list of the words that make up the censored text\n censored_text: List[str] = [\n censor_word(word) if should_censor else word\n for word, should_censor in censor_map\n ]\n\n return \" \".join(censored_text)", "def analyze(self, text):\n\n # start from 0 for each Analyser variable\n self.positives = 0\n self.negatives = 0\n\n # precise self text value\n self.text = text\n\n # declare a tokenased word\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n\n # indicate the length of list tokens\n size = len(tokens)\n\n # all the word stuff to ckeck\n for word in tokens:\n\n # chaque mots est converti en mot sans majuscule\n word = str.lower(word)\n\n linespos = [line.rstrip('\\n') for line in open('positive-words.txt')]\n linesneg = [line.rstrip('\\n') for line in open('negative-words.txt')]\n\n # check for positive or negative or neutral words\n if word in linespos:\n self.positives += 1\n elif word in linesneg:\n self.negatives += 1\n else:\n continue\n\n # score calculculated and reurned\n score = self.positives - self.negatives\n\n return score", "def check(self, text):\n p = self.d\n i = 0\n j = 0\n result = []\n ln = len(text)\n while i + j < ln:\n t = text[i + j].lower()\n # print i,j,hex(ord(t))\n if not (t in p):\n j = 0\n i += 1\n p = self.d\n continue\n p = p[t]\n j += 1\n # print p,i,j\n if chr(11) in p:\n p = self.d\n result.append(text[i:i + j])\n i = i + j\n j = 0\n return result", "def analyse(self):\n logging.info(\"transferring text to CorpusCook...\")\n\n paragraphs = self.text.split('\\n\\n')\n print(\"mean length of splitted lines\", (mean([len(p) for p in paragraphs])))\n\n # If TIKA resolved '\\n'\n if (mean([len(p) for p in paragraphs])) > 80:\n paragraphs = [re.sub(r\"- *\\n\", '', p) for p in paragraphs]\n paragraphs = [p.replace('\\n', \" \") for p in paragraphs]\n paragraphs = [p.replace(';', \" \") for p in paragraphs]\n joiner = \" \"\n else:\n # If TIKA did not\n joiner = \" \"\n\n processed_text = joiner.join([p\n for p in paragraphs\n if\n p and\n ks_2samp(self.normal_data, list(p)).pvalue > self.threshold\n ]\n )\n\n return processed_text.strip()[:self.length_limit]", "def getCasing(word):\n casing = 'other'\n \n numDigits = 0\n for char in word:\n if char.isdigit():\n numDigits += 1\n \n digitFraction = numDigits / float(len(word))\n \n if word.isdigit(): #Is a digit\n casing = 'numeric'\n elif digitFraction > 0.5:\n casing = 'mainly_numeric'\n elif word.islower(): #All lower case\n casing = 'allLower'\n elif word.isupper(): #All upper case\n casing = 'allUpper'\n elif word[0].isupper(): #is a title, initial char upper, then all lower\n casing = 'initialUpper'\n elif numDigits > 0:\n casing = 'contains_digit'\n \n return casing", "def classify(self, sText):\n\n sum1, sum2 = self.count()\n\n #len1 = len(self.posRev)\n #len2 = len(self.negRev)\n\n probPos = 0 #math.log(float(sum1)/(sum1+sum2))\n probNeg = 0 #math.log(float(sum2)/(sum1+sum2))\n\n ls = self.tokenize(sText)\n\n #test Positive case\n for word in ls:\n prob = float(self.posRev.get(word, 0) + 1)/(sum1)\n if prob != 0:\n probPos += math.log(prob)\n\n #test Negative case\n for word in ls:\n prob = float(self.negRev.get(word, 0) + 1)/(sum2)\n if prob != 0:\n probNeg += math.log(prob)\n\n print probPos\n print probNeg\n\n print probPos-probNeg\n if (probPos - probNeg) > 1:\n return \"positive\"\n elif (probNeg - probPos) > 1:\n return \"negative\"\n else:\n return \"neutral\"", "def verify_text(self, text):\n pass", "def process(self, message: Message, **kwargs: Any) -> None:\n\n try:\n textdata = message.data[\"text\"]\n # print(\"text :::\" + textdata)\n textdata = textdata.split()\n new_message = \" \".join(spell.correction(w) for w in textdata)\n # print(\"after correction text :::\" + new_message)\n message.data[\"text\"] = new_message\n except KeyError:\n pass", "def get_accredit_info(self, accredit_dict, library_construction, proj_name):\n accredit_info = {}\n for key in accredit_dict:\n accredit = accredit_dict[key]\n ## For \"finished library\" projects, set certain accredation steps as \"NA\" even if not set by default\n if key in ['library_preparation','data_analysis'] and library_construction == 'Library was prepared by user.':\n accredit_info[key] = 'Not Applicable'\n elif accredit in ['Yes','No']:\n accredit_info[key] = '{} under ISO/IEC 17025'.format(['[cross] Not accredited','[tick] Accredited'][accredit == 'Yes'])\n elif accredit == 'N/A':\n accredit_info[key] = 'Not Applicable'\n else:\n self.LOG.error('Accreditation step {} for project {} is found, but no value is set'.format(key, proj_name))\n return accredit_info", "def remove_contractions(data: pd.Series) -> pd.Series:\n data_ = data.copy()\n with open(CONTRACTIONS, 'rb') as f:\n contractions = pickle.load(f)\n for kind in contractions.keys():\n words = contractions[kind]\n for word in words:\n word_no_backslash = word.replace('\\\\','')\n try:\n data_ = data_.str.replace(r'\\b{}\\b'.format(word), words[word_no_backslash])\n except:\n # for contractions with escape characters\n data_ = data_.str.replace(f'{word_no_backslash}', words[word])\n return data_", "def analyze(self, text):\n\n # TODO\n # tokens = tokenizer.tokenize(tweet)\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n score = 0\n\n for word in tokens:\n # iterate over tokens#str.lower\n\n if word.lower() in self.positives:\n score = score+1\n\n elif word.lower() in self.negatives:\n score = score-1\n\n else:\n continue\n return score", "def isccp4keytext(self, text):\n #\n # See e.g. http://www.ccp4.ac.uk/dist/html/loggraphformat.html\n # for format of TEXT information, but essentially it's:\n #\n # $TEXT :text name: $$ junk (ignored) text $$any text characters$$\n #\n keytext = self.compile(\n \"isccp4keytext\", r\"\\$TEXT[ \\n]*:([^:]*):[ \\n]*\\$\\$([^\\$]*)\\$\\$([^\\$]*)\\$\\$\"\n ).search(text)\n result = dict()\n if keytext:\n result[\"name\"] = keytext.group(1)\n result[\"junk_text\"] = keytext.group(2)\n result[\"message\"] = keytext.group(3)\n result[\"nlines\"] = keytext.group(0).count(\"\\n\")\n return result", "def phrase_dict(phrase):\n switcher = {\n '처음으로': '닥앤미 병원을 찾아주셔서 감사합니다. 직접문의원할시 오른쪽 아래 1:1 버튼을 눌러주시면 직접 상담 가능합니다. 1:1 상담 가능 시간은 09시 – 18시 입니다.',\n '병원 정보': '어떤 정보를 보시고 싶으신가요?',\n '병원 위치': '“닥앤미 병원 주소는 서울시 용산구 이촌동 세움상가 2층입니다.” 더 자세한 지도확인을 원하실 경우 아래 버튼을 눌러주세요',\n '병원 운영시간': '닥앤미 병원을 찾아주셔서 감사합니다. 병원 운영시간은 위의 내용과 같습니다',\n '병원 프로모션': '현재 진행되고 있는 병원 프로모션입니다. 자세히 보길 원하시면 아래의 프로모션을 선택해 주세요',\n '프로모션 A': '닥앤미에서 6월 30일까지 제공되는 프로모션 A 입니다.',\n '프로모션 B': '닥앤미에서 6월 30일까지 제공되는 프로모션 B 입니다.',\n '프로모션 C': '닥앤미에서 6월 30일까지 제공되는 프로모션 C 입니다.',\n '의료진': '안녕하세요, 닥앤미의 홍길동 전문의 입니다. 항상 최선을 다하겠습니다.',\n '병원 사진': '최고의 진료를 제공하는 닥앤미 병원입니다.',\n '병원 진료과목': '닥앤미 병원의 진료과목입니다.',\n '병원 전화하기': '닥앤미 병원 전화번호는 02 3522 XXXX 입니다. 지금 통화를 원하시면 아래 버튼을 눌러주세요'\n }\n default_text = 'Unable to find appropriate text response'\n return switcher.get(phrase, default_text)", "def get_correct(entry):\n return entry['correct'] == \"1.0\"", "def spelling(p_str):\n spell = SpellChecker(language='en')\n misspelled = spell.unknown(re.sub(r'[^\\w\\s]', '', p_str).split())\n corrections = {}\n for word in misspelled:\n tmp = list(spell.candidates(word))\n tmp.insert(0, spell.correction(word))\n corrections[word] = tmp\n return(corrections)", "def _separate_possessives(text: str) -> (dict, str):\n possessive_dict = dict() # Dictionary of nouns (keys) with their possessive modifiers (values)\n revised_words = []\n if '/poss/' in text:\n space_splits = text.split()\n for index in range(0, len(space_splits)):\n if '/poss/' in space_splits[index]:\n possessive_dict[space_splits[index + 1]] = space_splits[index].replace('/poss/', empty_string)\n else:\n revised_words.append(space_splits[index])\n return possessive_dict, space.join(revised_words)\n return possessive_dict, text", "def beginning_checker(self, translit):\n tr_new = re.sub(r'(\\A|·)夫', r'\\1弗', translit)\n tr_new = re.sub(r'(\\A|·)耶', r'\\1叶', tr_new)\n return tr_new", "def text_analise(file_n, letter, title=''):\r\n dict_percent = {}\r\n other_signs2 = {}\r\n other_signs2_turn = {}\r\n\r\n for i in letter:\r\n with open(file_n) as f:\r\n text = f.read()\r\n count = 0\r\n count_others = 0\r\n for ch in text.lower():\r\n if ch == i:\r\n count += 1\r\n if ch not in letter:\r\n count_others += 1\r\n other_signs2[ch] = count_others\r\n # if ch not in other_signs:\r\n # other_signs.append(ch)\r\n procent = round((100 * count) / len(text), 2)\r\n procent_other = round((100 * count_others) / len(text), 2)\r\n\r\n dict_percent[procent] = [i, count]\r\n\r\n x = 0\r\n for key in sorted(dict_percent):\r\n print('Percent {} of letter {}'.format(key, dict_percent[key]))\r\n x += key\r\n with open('3.txt', 'a') as f:\r\n f.write(\r\n 'Percent {} of letter {} \\n'.format(key, dict_percent[key]))\r\n\r\n other_percents = round((100 - x), 2)\r\n print('Other: {} percent'.format(other_percents))\r\n print('ACTUAL Other: {} percent'.format(procent_other))\r\n print('Length of text: {}\\n'.format(len(text)))\r\n # print(list(set(other_signs)))\r\n\r\n for a, b in other_signs2.items():\r\n if a != '\\n':\r\n other_signs2_turn[b] = a\r\n elif a == '\\n':\r\n other_signs2_turn[b] = 'Enter'\r\n\r\n with open('3.txt', 'a') as f:\r\n f.write('Other: {} percent\\n'.format(round((100 - x), 2)))\r\n f.write('процентов учтено: {}\\n'.format(x))\r\n f.write('Length of text: {}\\n'.format(len(text)))\r\n f.write('Title of text: {}\\n\\n'.format(title))\r\n for key in sorted(other_signs2_turn):\r\n prnt = ('Letter {} used {} times'.format(\r\n other_signs2_turn[key], key))\r\n prnt_wr = prnt + '\\n'\r\n print(prnt)\r\n f.write(prnt_wr)", "def init_correction_map(token, dictionary):\n if token is None:\n return None\n\n if len(token) <= 2 or token.lower() in dictionary:\n return {token: 1}\n\n return {}", "def calculate_construction(self, word):\r\n \r\n construction = \"\"\r\n for c in word.lower():\r\n if c in self.vowels:\r\n construction += \"v\"\r\n elif c in letters:\r\n construction += \"c\"\r\n return construction", "def check_sentences(text, threshold=80, print_only=False):\n non_white_text = re.sub(masks, \"\", re.sub(emojis, \"\", re.sub(punctuation, \"\", re.sub(\"\\s\", \"\", text))))\n num_chars = len(non_white_text)\n num_non_swiss_chars = 0\n for char in non_white_text:\n if char not in swiss_chars:\n num_non_swiss_chars += 1\n ratio = num_non_swiss_chars / num_chars * 100 if num_chars != 0 else 0\n if ratio > threshold:\n return \"POSSIBLE NON_SWISS GERMAN TEXT:\" + text if print_only else False\n else:\n return text", "def transcribe(dna):\n str = ''\n dict = {'C': 'C', 'G': 'G', 'A': 'A', 'T': 'U'}\n for char in dna:\n if char == 'C' or char == 'G' or char == 'T' or char == 'A':\n #converting only of the valid string is encountered\n #then the string is converted accordingly\n str = str + dict[char]\n #the case for incalid string, it throws only the error\n else :\n str = 'invalid character entered, please check the input'\n break\n return str", "def passion_analyzer(text):\n\n\tlower_text = text.lower()\n\n\thashtag_scaling = 0.3\n\texclamation_scaling = 0.5\n\tuppercase_scaling = 0.2\n\n\n\tpassion_index = 0\n\n\tfor x in range(len(positive_words)):\n\t\tpassion_index += (lower_text.count(positive_words[x]))**2\n\tfor x in range(len(negative_words)):\n\t\tpassion_index -= (lower_text.count(negative_words[x]))**2\n\tif '!' in text:\n\t\tpassion_index *= exclamation_scaling * lower_text.count('!') + 1\n\tif '#' in text:\n\t\tpassion_index *= hashtag_scaling * lower_text.count('#') + 1\n\tpassion_index *= uppercase_scaling * sum(1 for c in text if c.isupper())\n\n\n\t\t\n\treturn math.sqrt(passion_index)", "def cleanText(text):\n try:\n text = str(text)\n\n # remove contactions and stop words\n text = contractions(text)\n # remove html entities\n cleanr = re.compile('<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')\n new_text = cleanr.sub('', text.strip())\n return re.sub(r'\\s+', ' ', re.sub(r'\\W+', \" \", new_text))\n # TAG_RE = re.compile(r'<[^>]+>')\n except:\n print(\"An exception occurred with: \" + text)\n return str(text)", "def test_return_advice_under_cruising_weight(self):\n data_weight_user = {\"height\": \"1,60\", \"actual_weight\": \"60\",\n \"cruising_weight\": \"55\", \"weight_goal\": \"51\"}\n return_advice = self.new_weight_advice_goal.return_weight_advices_goal(data_weight_user)[1]\n\n advice = \"Chaque personne a un poids d'équilibre sur lequel il peut rester longtemps, \" \\\n \"c'est se qu'on appelle le poids de croisière. Il semble que ton objectif \" \\\n \"aille en dessous de ce poids. Je tiens donc à te préciser qu'il est \" \\\n \"possible que tu n'arrives pas à le maintenir sur la durée. \" \\\n \"Je note tout de même cet objectif. \"\n self.assertEqual(return_advice, advice)", "def analyze(self, text):\n\n tknzr = nltk.tokenize.TweetTokenizer()\n words = tknzr.tokenize(text)\n \n score = 0\n \n for word in words:\n if word.lower() in self.positives:\n score += 1\n elif word.lower() in self.negatives:\n score -= 1\n else:\n continue\n \n return score", "def compute_readability(text):\n total_words = 0\n total_sentences = 0\n total_syllables = 0\n score = 0\n\n words = text.split()\n total_words = len(text.split()) \n total_sentences = count_sentences(text)\n total_syllables = count_syllables(words)\n \n score = 206.835 - 1.015 * ( total_words / total_sentences) - 84.6 * (total_syllables / total_words)\n if score > 90.00:\n answer = 'Texto de nível do 5º ano do Ensino Fundamental, facilmente compreendido por um aluno de 11 anos.'\n elif score <= 90.00 and score > 80.00:\n answer = 'Texto de nível do 6º ano do Ensino Fundamental, inglês coloquial para consumidores.'\n elif score <= 80.00 and score > 70.00:\n answer = 'Texto de nível do 7º ano do Ensino Fundamental, razoavelmente fácil de ler.'\n elif score <= 70.00 and score > 60.00:\n answer = 'Texto de nível do 9º ano do Ensino Fundamental, Inglês simples compreendido por adolescentes de 13 - 15 anos.'\n elif score <= 60.00 and score > 50.00:\n answer = 'Texto de 1º a 3º ano do Ensino Médio, razoavelmente difícil de ler.'\n elif score <= 50.00 and score > 30.00:\n answer = 'Texto de nível Universitário, difícil de ler.'\n else:\n answer = 'Texto de nível de Graduação, muito difícil de ler e mais bem-compreendido por universitários graduados.'\n \n print('Pontuação Total:', score, answer)", "def contraction_expansion_on_corpus(text_corpus):\n\n text_corpus[text_column_name] = text_corpus[\n text_column_name].apply(contraction_expansion)\n return text_corpus", "def canada_query(text):\n return 'canada' in text.lower()", "def analyze(self, text):\n tknzr = nltk.tokenize.casual.TweetTokenizer(preserve_case=True, reduce_len=False, strip_handles=False)\n tknTxt = tknzr.tokenize(text)\n sentiment = 0\n \n for i in range(len(tknTxt)):\n if tknTxt[i] in self.posTxt:\n #print(\"POS\")\n #print(tknTxt[i])\n sentiment += 1\n elif tknTxt[i] in self.negTxt:\n #print(\"NEG\")\n #print(tknTxt[i])\n sentiment -= 1\n \n return sentiment", "def text_process_dict(field, dictionary:dict):\n values = dictionary.get(\"values\")\n if is_valid_text(values):\n is_exact = values.get(\"is_exact\", False)\n _term = values.get(\"term\", False)\n filtered_term = re.sub('[^a-zA-Z0-9\\n\\.|\\*|\\@|\\|\\_]', ' ', _term)\n return text_process(field, filtered_term, is_exact=is_exact)\n return {\n\n }" ]
[ "0.68868476", "0.6857023", "0.68527967", "0.68527967", "0.66898316", "0.6347374", "0.5993866", "0.5918088", "0.5882449", "0.5777897", "0.57183623", "0.5687876", "0.5641633", "0.5621483", "0.5601896", "0.5554486", "0.5458176", "0.5382929", "0.5322964", "0.5307496", "0.5253449", "0.5250009", "0.5248516", "0.5231251", "0.52233654", "0.52152175", "0.5203586", "0.52029675", "0.5199981", "0.5184434", "0.51733065", "0.5170581", "0.5167392", "0.5149423", "0.51478076", "0.5132091", "0.5106502", "0.50916225", "0.5088874", "0.5066728", "0.5063786", "0.5063706", "0.5063431", "0.5047348", "0.5044192", "0.5020982", "0.5014858", "0.5014735", "0.50016844", "0.4986884", "0.49544588", "0.49536797", "0.49524906", "0.495148", "0.4935025", "0.49214697", "0.4918102", "0.4911784", "0.49081188", "0.4904668", "0.4903378", "0.48844442", "0.48794806", "0.48650438", "0.48638597", "0.48599193", "0.48584312", "0.4858104", "0.4842912", "0.4842896", "0.48358744", "0.4831218", "0.48304614", "0.48251155", "0.48171404", "0.4817052", "0.48114228", "0.48064727", "0.48052984", "0.48050895", "0.48022425", "0.48006287", "0.47961688", "0.47931743", "0.47819814", "0.477882", "0.4778772", "0.47766784", "0.4776109", "0.47744313", "0.47708517", "0.47705317", "0.4770378", "0.4768055", "0.47634122", "0.4763165", "0.47622663", "0.4756365", "0.47466785", "0.47427803" ]
0.67498755
4